]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9-2.6.32.58-201203162123.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-2.6.32.58-201203162123.patch
CommitLineData
cc0d37e9
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index e1efc40..4e87324 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -1,15 +1,20 @@
6 *.a
7 *.aux
8 *.bin
9+*.c.[012].*
10+*.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17 *.eps
18 *.fw
19+*.gcno
20 *.gen.S
21 *.gif
22+*.gmo
23 *.grep
24 *.grp
25 *.gz
26@@ -38,8 +43,10 @@
27 *.tab.h
28 *.tex
29 *.ver
30+*.vim
31 *.xml
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 *.9
37@@ -49,11 +56,16 @@
38 53c700_d.h
39 CVS
40 ChangeSet
41+GPATH
42+GRTAGS
43+GSYMS
44+GTAGS
45 Image
46 Kerntypes
47 Module.markers
48 Module.symvers
49 PENDING
50+PERF*
51 SCCS
52 System.map*
53 TAGS
54@@ -76,7 +88,11 @@ btfixupprep
55 build
56 bvmlinux
57 bzImage*
58+capability_names.h
59+capflags.c
60 classlist.h*
61+clut_vga16.c
62+common-cmds.h
63 comp*.log
64 compile.h*
65 conf
66@@ -84,6 +100,8 @@ config
67 config-*
68 config_data.h*
69 config_data.gz*
70+config.c
71+config.tmp
72 conmakehash
73 consolemap_deftbl.c*
74 cpustr.h
75@@ -97,19 +115,23 @@ elfconfig.h*
76 fixdep
77 fore200e_mkfirm
78 fore200e_pca_fw.c*
79+gate.lds
80 gconf
81 gen-devlist
82 gen_crc32table
83 gen_init_cpio
84 genksyms
85 *_gray256.c
86+hash
87+hid-example
88 ihex2fw
89 ikconfig.h*
90 initramfs_data.cpio
91+initramfs_data.cpio.bz2
92 initramfs_data.cpio.gz
93 initramfs_list
94 kallsyms
95-kconfig
96+kern_constants.h
97 keywords.c
98 ksym.c*
99 ksym.h*
100@@ -117,6 +139,7 @@ kxgettext
101 lkc_defs.h
102 lex.c
103 lex.*.c
104+lib1funcs.S
105 logo_*.c
106 logo_*_clut224.c
107 logo_*_mono.c
108@@ -127,13 +150,16 @@ machtypes.h
109 map
110 maui_boot.h
111 mconf
112+mdp
113 miboot*
114 mk_elfconfig
115 mkboot
116 mkbugboot
117 mkcpustr
118 mkdep
119+mkpiggy
120 mkprep
121+mkregtable
122 mktables
123 mktree
124 modpost
125@@ -149,6 +175,7 @@ patches*
126 pca200e.bin
127 pca200e_ecd.bin2
128 piggy.gz
129+piggy.S
130 piggyback
131 pnmtologo
132 ppc_defs.h*
133@@ -157,12 +184,15 @@ qconf
134 raid6altivec*.c
135 raid6int*.c
136 raid6tables.c
137+regdb.c
138 relocs
139+rlim_names.h
140 series
141 setup
142 setup.bin
143 setup.elf
144 sImage
145+slabinfo
146 sm_tbl*
147 split-include
148 syscalltab.h
149@@ -171,6 +201,7 @@ tftpboot.img
150 timeconst.h
151 times.h*
152 trix_boot.h
153+user_constants.h
154 utsrelease.h*
155 vdso-syms.lds
156 vdso.lds
157@@ -186,14 +217,20 @@ version.h*
158 vmlinux
159 vmlinux-*
160 vmlinux.aout
161+vmlinux.bin.all
162+vmlinux.bin.bz2
163 vmlinux.lds
164+vmlinux.relocs
165+voffset.h
166 vsyscall.lds
167 vsyscall_32.lds
168 wanxlfw.inc
169 uImage
170 unifdef
171+utsrelease.h
172 wakeup.bin
173 wakeup.elf
174 wakeup.lds
175 zImage*
176 zconf.hash.c
177+zoffset.h
178diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
179index c840e7d..f4c451c 100644
180--- a/Documentation/kernel-parameters.txt
181+++ b/Documentation/kernel-parameters.txt
182@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters. It is defined in the file
183 the specified number of seconds. This is to be used if
184 your oopses keep scrolling off the screen.
185
186+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
187+ virtualization environments that don't cope well with the
188+ expand down segment used by UDEREF on X86-32 or the frequent
189+ page table updates on X86-64.
190+
191+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
192+
193 pcbit= [HW,ISDN]
194
195 pcd. [PARIDE]
196diff --git a/MAINTAINERS b/MAINTAINERS
197index 613da5d..4fe3eda 100644
198--- a/MAINTAINERS
199+++ b/MAINTAINERS
200@@ -5725,6 +5725,14 @@ L: netdev@vger.kernel.org
201 S: Maintained
202 F: drivers/net/vmxnet3/
203
204+VMware PVSCSI driver
205+M: Alok Kataria <akataria@vmware.com>
206+M: VMware PV-Drivers <pv-drivers@vmware.com>
207+L: linux-scsi@vger.kernel.org
208+S: Maintained
209+F: drivers/scsi/vmw_pvscsi.c
210+F: drivers/scsi/vmw_pvscsi.h
211+
212 VOLTAGE AND CURRENT REGULATOR FRAMEWORK
213 M: Liam Girdwood <lrg@slimlogic.co.uk>
214 M: Mark Brown <broonie@opensource.wolfsonmicro.com>
215diff --git a/Makefile b/Makefile
216index ed78982..1074149 100644
217--- a/Makefile
218+++ b/Makefile
219@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
220
221 HOSTCC = gcc
222 HOSTCXX = g++
223-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
224-HOSTCXXFLAGS = -O2
225+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
226+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
227+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
228
229 # Decide whether to build built-in, modular, or both.
230 # Normally, just do built-in.
231@@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
232 # Rules shared between *config targets and build targets
233
234 # Basic helpers built in scripts/
235-PHONY += scripts_basic
236-scripts_basic:
237+PHONY += scripts_basic gcc-plugins
238+scripts_basic: gcc-plugins
239 $(Q)$(MAKE) $(build)=scripts/basic
240
241 # To avoid any implicit rule to kick in, define an empty command.
242@@ -403,7 +404,7 @@ endif
243 # of make so .config is not included in this case either (for *config).
244
245 no-dot-config-targets := clean mrproper distclean \
246- cscope TAGS tags help %docs check% \
247+ cscope gtags TAGS tags help %docs check% \
248 include/linux/version.h headers_% \
249 kernelrelease kernelversion
250
251@@ -526,6 +527,53 @@ else
252 KBUILD_CFLAGS += -O2
253 endif
254
255+ifndef DISABLE_PAX_PLUGINS
256+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
257+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
258+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
259+endif
260+ifdef CONFIG_PAX_MEMORY_STACKLEAK
261+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
262+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
263+endif
264+ifdef CONFIG_KALLOCSTAT_PLUGIN
265+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
266+endif
267+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
268+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
269+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
270+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
271+endif
272+ifdef CONFIG_CHECKER_PLUGIN
273+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
274+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
275+endif
276+endif
277+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
278+ifdef CONFIG_PAX_SIZE_OVERFLOW
279+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
280+endif
281+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
282+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) $(SIZE_OVERFLOW_PLUGIN_CFLAGS)
283+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
284+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN SIZE_OVERFLOW_PLUGIN
285+ifeq ($(KBUILD_EXTMOD),)
286+gcc-plugins:
287+ $(Q)$(MAKE) $(build)=tools/gcc
288+else
289+gcc-plugins: ;
290+endif
291+else
292+gcc-plugins:
293+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
294+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
295+else
296+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
297+endif
298+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
299+endif
300+endif
301+
302 include $(srctree)/arch/$(SRCARCH)/Makefile
303
304 ifneq ($(CONFIG_FRAME_WARN),0)
305@@ -647,7 +695,7 @@ export mod_strip_cmd
306
307
308 ifeq ($(KBUILD_EXTMOD),)
309-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
310+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
311
312 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
313 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
314@@ -868,6 +916,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
315
316 # The actual objects are generated when descending,
317 # make sure no implicit rule kicks in
318+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
319+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
320 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
321
322 # Handle descending into subdirectories listed in $(vmlinux-dirs)
323@@ -877,7 +927,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
324 # Error messages still appears in the original language
325
326 PHONY += $(vmlinux-dirs)
327-$(vmlinux-dirs): prepare scripts
328+$(vmlinux-dirs): gcc-plugins prepare scripts
329 $(Q)$(MAKE) $(build)=$@
330
331 # Build the kernel release string
332@@ -986,6 +1036,7 @@ prepare0: archprepare FORCE
333 $(Q)$(MAKE) $(build)=. missing-syscalls
334
335 # All the preparing..
336+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
337 prepare: prepare0
338
339 # The asm symlink changes when $(ARCH) changes.
340@@ -1127,6 +1178,8 @@ all: modules
341 # using awk while concatenating to the final file.
342
343 PHONY += modules
344+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
345+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
346 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
347 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
348 @$(kecho) ' Building modules, stage 2.';
349@@ -1136,7 +1189,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
350
351 # Target to prepare building external modules
352 PHONY += modules_prepare
353-modules_prepare: prepare scripts
354+modules_prepare: gcc-plugins prepare scripts
355
356 # Target to install modules
357 PHONY += modules_install
358@@ -1201,7 +1254,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
359 include/linux/autoconf.h include/linux/version.h \
360 include/linux/utsrelease.h \
361 include/linux/bounds.h include/asm*/asm-offsets.h \
362- Module.symvers Module.markers tags TAGS cscope*
363+ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
364
365 # clean - Delete most, but leave enough to build external modules
366 #
367@@ -1245,7 +1298,7 @@ distclean: mrproper
368 @find $(srctree) $(RCS_FIND_IGNORE) \
369 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
370 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
371- -o -name '.*.rej' -o -size 0 \
372+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
373 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
374 -type f -print | xargs rm -f
375
376@@ -1292,6 +1345,7 @@ help:
377 @echo ' modules_prepare - Set up for building external modules'
378 @echo ' tags/TAGS - Generate tags file for editors'
379 @echo ' cscope - Generate cscope index'
380+ @echo ' gtags - Generate GNU GLOBAL index'
381 @echo ' kernelrelease - Output the release version string'
382 @echo ' kernelversion - Output the version stored in Makefile'
383 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
384@@ -1393,6 +1447,8 @@ PHONY += $(module-dirs) modules
385 $(module-dirs): crmodverdir $(objtree)/Module.symvers
386 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
387
388+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
389+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
390 modules: $(module-dirs)
391 @$(kecho) ' Building modules, stage 2.';
392 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
393@@ -1448,7 +1504,7 @@ endif # KBUILD_EXTMOD
394 quiet_cmd_tags = GEN $@
395 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
396
397-tags TAGS cscope: FORCE
398+tags TAGS cscope gtags: FORCE
399 $(call cmd,tags)
400
401 # Scripts to check various things for consistency
402@@ -1513,17 +1569,21 @@ else
403 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
404 endif
405
406-%.s: %.c prepare scripts FORCE
407+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
408+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
409+%.s: %.c gcc-plugins prepare scripts FORCE
410 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
411 %.i: %.c prepare scripts FORCE
412 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
413-%.o: %.c prepare scripts FORCE
414+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
415+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
416+%.o: %.c gcc-plugins prepare scripts FORCE
417 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
418 %.lst: %.c prepare scripts FORCE
419 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
420-%.s: %.S prepare scripts FORCE
421+%.s: %.S gcc-plugins prepare scripts FORCE
422 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
423-%.o: %.S prepare scripts FORCE
424+%.o: %.S gcc-plugins prepare scripts FORCE
425 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
426 %.symtypes: %.c prepare scripts FORCE
427 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
428@@ -1533,11 +1593,15 @@ endif
429 $(cmd_crmodverdir)
430 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
431 $(build)=$(build-dir)
432-%/: prepare scripts FORCE
433+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
434+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
435+%/: gcc-plugins prepare scripts FORCE
436 $(cmd_crmodverdir)
437 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
438 $(build)=$(build-dir)
439-%.ko: prepare scripts FORCE
440+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
441+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
442+%.ko: gcc-plugins prepare scripts FORCE
443 $(cmd_crmodverdir)
444 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
445 $(build)=$(build-dir) $(@:.ko=.o)
446diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
447index 610dff4..f396854 100644
448--- a/arch/alpha/include/asm/atomic.h
449+++ b/arch/alpha/include/asm/atomic.h
450@@ -251,6 +251,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
451 #define atomic_dec(v) atomic_sub(1,(v))
452 #define atomic64_dec(v) atomic64_sub(1,(v))
453
454+#define atomic64_read_unchecked(v) atomic64_read(v)
455+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
456+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
457+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
458+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
459+#define atomic64_inc_unchecked(v) atomic64_inc(v)
460+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
461+#define atomic64_dec_unchecked(v) atomic64_dec(v)
462+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
463+
464 #define smp_mb__before_atomic_dec() smp_mb()
465 #define smp_mb__after_atomic_dec() smp_mb()
466 #define smp_mb__before_atomic_inc() smp_mb()
467diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
468index f199e69..af005f5 100644
469--- a/arch/alpha/include/asm/cache.h
470+++ b/arch/alpha/include/asm/cache.h
471@@ -4,19 +4,20 @@
472 #ifndef __ARCH_ALPHA_CACHE_H
473 #define __ARCH_ALPHA_CACHE_H
474
475+#include <linux/const.h>
476
477 /* Bytes per L1 (data) cache line. */
478 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
479-# define L1_CACHE_BYTES 64
480 # define L1_CACHE_SHIFT 6
481 #else
482 /* Both EV4 and EV5 are write-through, read-allocate,
483 direct-mapped, physical.
484 */
485-# define L1_CACHE_BYTES 32
486 # define L1_CACHE_SHIFT 5
487 #endif
488
489+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
490+
491 #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
492 #define SMP_CACHE_BYTES L1_CACHE_BYTES
493
494diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
495index 5c75c1b..c82f878 100644
496--- a/arch/alpha/include/asm/elf.h
497+++ b/arch/alpha/include/asm/elf.h
498@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
499
500 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
501
502+#ifdef CONFIG_PAX_ASLR
503+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
504+
505+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
506+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
507+#endif
508+
509 /* $0 is set by ld.so to a pointer to a function which might be
510 registered using atexit. This provides a mean for the dynamic
511 linker to call DT_FINI functions for shared libraries that have
512diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
513index 3f0c59f..cf1e100 100644
514--- a/arch/alpha/include/asm/pgtable.h
515+++ b/arch/alpha/include/asm/pgtable.h
516@@ -101,6 +101,17 @@ struct vm_area_struct;
517 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
518 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
519 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
520+
521+#ifdef CONFIG_PAX_PAGEEXEC
522+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
523+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
524+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
525+#else
526+# define PAGE_SHARED_NOEXEC PAGE_SHARED
527+# define PAGE_COPY_NOEXEC PAGE_COPY
528+# define PAGE_READONLY_NOEXEC PAGE_READONLY
529+#endif
530+
531 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
532
533 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
534diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
535index ebc3c89..20cfa63 100644
536--- a/arch/alpha/kernel/module.c
537+++ b/arch/alpha/kernel/module.c
538@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
539
540 /* The small sections were sorted to the end of the segment.
541 The following should definitely cover them. */
542- gp = (u64)me->module_core + me->core_size - 0x8000;
543+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
544 got = sechdrs[me->arch.gotsecindex].sh_addr;
545
546 for (i = 0; i < n; i++) {
547diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
548index a94e49c..d71dd44 100644
549--- a/arch/alpha/kernel/osf_sys.c
550+++ b/arch/alpha/kernel/osf_sys.c
551@@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
552 /* At this point: (!vma || addr < vma->vm_end). */
553 if (limit - len < addr)
554 return -ENOMEM;
555- if (!vma || addr + len <= vma->vm_start)
556+ if (check_heap_stack_gap(vma, addr, len))
557 return addr;
558 addr = vma->vm_end;
559 vma = vma->vm_next;
560@@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
561 merely specific addresses, but regions of memory -- perhaps
562 this feature should be incorporated into all ports? */
563
564+#ifdef CONFIG_PAX_RANDMMAP
565+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
566+#endif
567+
568 if (addr) {
569 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
570 if (addr != (unsigned long) -ENOMEM)
571@@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
572 }
573
574 /* Next, try allocating at TASK_UNMAPPED_BASE. */
575- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
576- len, limit);
577+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
578+
579 if (addr != (unsigned long) -ENOMEM)
580 return addr;
581
582diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
583index 00a31de..2ded0f2 100644
584--- a/arch/alpha/mm/fault.c
585+++ b/arch/alpha/mm/fault.c
586@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
587 __reload_thread(pcb);
588 }
589
590+#ifdef CONFIG_PAX_PAGEEXEC
591+/*
592+ * PaX: decide what to do with offenders (regs->pc = fault address)
593+ *
594+ * returns 1 when task should be killed
595+ * 2 when patched PLT trampoline was detected
596+ * 3 when unpatched PLT trampoline was detected
597+ */
598+static int pax_handle_fetch_fault(struct pt_regs *regs)
599+{
600+
601+#ifdef CONFIG_PAX_EMUPLT
602+ int err;
603+
604+ do { /* PaX: patched PLT emulation #1 */
605+ unsigned int ldah, ldq, jmp;
606+
607+ err = get_user(ldah, (unsigned int *)regs->pc);
608+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
609+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
610+
611+ if (err)
612+ break;
613+
614+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
615+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
616+ jmp == 0x6BFB0000U)
617+ {
618+ unsigned long r27, addr;
619+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
620+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
621+
622+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
623+ err = get_user(r27, (unsigned long *)addr);
624+ if (err)
625+ break;
626+
627+ regs->r27 = r27;
628+ regs->pc = r27;
629+ return 2;
630+ }
631+ } while (0);
632+
633+ do { /* PaX: patched PLT emulation #2 */
634+ unsigned int ldah, lda, br;
635+
636+ err = get_user(ldah, (unsigned int *)regs->pc);
637+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
638+ err |= get_user(br, (unsigned int *)(regs->pc+8));
639+
640+ if (err)
641+ break;
642+
643+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
644+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
645+ (br & 0xFFE00000U) == 0xC3E00000U)
646+ {
647+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
648+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
649+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
650+
651+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
652+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
653+ return 2;
654+ }
655+ } while (0);
656+
657+ do { /* PaX: unpatched PLT emulation */
658+ unsigned int br;
659+
660+ err = get_user(br, (unsigned int *)regs->pc);
661+
662+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
663+ unsigned int br2, ldq, nop, jmp;
664+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
665+
666+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
667+ err = get_user(br2, (unsigned int *)addr);
668+ err |= get_user(ldq, (unsigned int *)(addr+4));
669+ err |= get_user(nop, (unsigned int *)(addr+8));
670+ err |= get_user(jmp, (unsigned int *)(addr+12));
671+ err |= get_user(resolver, (unsigned long *)(addr+16));
672+
673+ if (err)
674+ break;
675+
676+ if (br2 == 0xC3600000U &&
677+ ldq == 0xA77B000CU &&
678+ nop == 0x47FF041FU &&
679+ jmp == 0x6B7B0000U)
680+ {
681+ regs->r28 = regs->pc+4;
682+ regs->r27 = addr+16;
683+ regs->pc = resolver;
684+ return 3;
685+ }
686+ }
687+ } while (0);
688+#endif
689+
690+ return 1;
691+}
692+
693+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
694+{
695+ unsigned long i;
696+
697+ printk(KERN_ERR "PAX: bytes at PC: ");
698+ for (i = 0; i < 5; i++) {
699+ unsigned int c;
700+ if (get_user(c, (unsigned int *)pc+i))
701+ printk(KERN_CONT "???????? ");
702+ else
703+ printk(KERN_CONT "%08x ", c);
704+ }
705+ printk("\n");
706+}
707+#endif
708
709 /*
710 * This routine handles page faults. It determines the address,
711@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
712 good_area:
713 si_code = SEGV_ACCERR;
714 if (cause < 0) {
715- if (!(vma->vm_flags & VM_EXEC))
716+ if (!(vma->vm_flags & VM_EXEC)) {
717+
718+#ifdef CONFIG_PAX_PAGEEXEC
719+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
720+ goto bad_area;
721+
722+ up_read(&mm->mmap_sem);
723+ switch (pax_handle_fetch_fault(regs)) {
724+
725+#ifdef CONFIG_PAX_EMUPLT
726+ case 2:
727+ case 3:
728+ return;
729+#endif
730+
731+ }
732+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
733+ do_group_exit(SIGKILL);
734+#else
735 goto bad_area;
736+#endif
737+
738+ }
739 } else if (!cause) {
740 /* Allow reads even for write-only mappings */
741 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
742diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
743index b68faef..6dd1496 100644
744--- a/arch/arm/Kconfig
745+++ b/arch/arm/Kconfig
746@@ -14,6 +14,7 @@ config ARM
747 select SYS_SUPPORTS_APM_EMULATION
748 select HAVE_OPROFILE
749 select HAVE_ARCH_KGDB
750+ select GENERIC_ATOMIC64
751 select HAVE_KPROBES if (!XIP_KERNEL)
752 select HAVE_KRETPROBES if (HAVE_KPROBES)
753 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
754diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
755index d0daeab..ca7e10e 100644
756--- a/arch/arm/include/asm/atomic.h
757+++ b/arch/arm/include/asm/atomic.h
758@@ -15,6 +15,10 @@
759 #include <linux/types.h>
760 #include <asm/system.h>
761
762+#ifdef CONFIG_GENERIC_ATOMIC64
763+#include <asm-generic/atomic64.h>
764+#endif
765+
766 #define ATOMIC_INIT(i) { (i) }
767
768 #ifdef __KERNEL__
769@@ -24,8 +28,16 @@
770 * strex/ldrex monitor on some implementations. The reason we can use it for
771 * atomic_set() is the clrex or dummy strex done on every exception return.
772 */
773-#define atomic_read(v) ((v)->counter)
774+#define atomic_read(v) (*(volatile int *)&(v)->counter)
775+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
776+{
777+ return v->counter;
778+}
779 #define atomic_set(v,i) (((v)->counter) = (i))
780+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
781+{
782+ v->counter = i;
783+}
784
785 #if __LINUX_ARM_ARCH__ >= 6
786
787@@ -40,6 +52,35 @@ static inline void atomic_add(int i, atomic_t *v)
788 int result;
789
790 __asm__ __volatile__("@ atomic_add\n"
791+"1: ldrex %1, [%2]\n"
792+" add %0, %1, %3\n"
793+
794+#ifdef CONFIG_PAX_REFCOUNT
795+" bvc 3f\n"
796+"2: bkpt 0xf103\n"
797+"3:\n"
798+#endif
799+
800+" strex %1, %0, [%2]\n"
801+" teq %1, #0\n"
802+" bne 1b"
803+
804+#ifdef CONFIG_PAX_REFCOUNT
805+"\n4:\n"
806+ _ASM_EXTABLE(2b, 4b)
807+#endif
808+
809+ : "=&r" (result), "=&r" (tmp)
810+ : "r" (&v->counter), "Ir" (i)
811+ : "cc");
812+}
813+
814+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
815+{
816+ unsigned long tmp;
817+ int result;
818+
819+ __asm__ __volatile__("@ atomic_add_unchecked\n"
820 "1: ldrex %0, [%2]\n"
821 " add %0, %0, %3\n"
822 " strex %1, %0, [%2]\n"
823@@ -58,6 +99,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
824 smp_mb();
825
826 __asm__ __volatile__("@ atomic_add_return\n"
827+"1: ldrex %1, [%2]\n"
828+" add %0, %1, %3\n"
829+
830+#ifdef CONFIG_PAX_REFCOUNT
831+" bvc 3f\n"
832+" mov %0, %1\n"
833+"2: bkpt 0xf103\n"
834+"3:\n"
835+#endif
836+
837+" strex %1, %0, [%2]\n"
838+" teq %1, #0\n"
839+" bne 1b"
840+
841+#ifdef CONFIG_PAX_REFCOUNT
842+"\n4:\n"
843+ _ASM_EXTABLE(2b, 4b)
844+#endif
845+
846+ : "=&r" (result), "=&r" (tmp)
847+ : "r" (&v->counter), "Ir" (i)
848+ : "cc");
849+
850+ smp_mb();
851+
852+ return result;
853+}
854+
855+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
856+{
857+ unsigned long tmp;
858+ int result;
859+
860+ smp_mb();
861+
862+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
863 "1: ldrex %0, [%2]\n"
864 " add %0, %0, %3\n"
865 " strex %1, %0, [%2]\n"
866@@ -78,6 +155,35 @@ static inline void atomic_sub(int i, atomic_t *v)
867 int result;
868
869 __asm__ __volatile__("@ atomic_sub\n"
870+"1: ldrex %1, [%2]\n"
871+" sub %0, %1, %3\n"
872+
873+#ifdef CONFIG_PAX_REFCOUNT
874+" bvc 3f\n"
875+"2: bkpt 0xf103\n"
876+"3:\n"
877+#endif
878+
879+" strex %1, %0, [%2]\n"
880+" teq %1, #0\n"
881+" bne 1b"
882+
883+#ifdef CONFIG_PAX_REFCOUNT
884+"\n4:\n"
885+ _ASM_EXTABLE(2b, 4b)
886+#endif
887+
888+ : "=&r" (result), "=&r" (tmp)
889+ : "r" (&v->counter), "Ir" (i)
890+ : "cc");
891+}
892+
893+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
894+{
895+ unsigned long tmp;
896+ int result;
897+
898+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
899 "1: ldrex %0, [%2]\n"
900 " sub %0, %0, %3\n"
901 " strex %1, %0, [%2]\n"
902@@ -96,11 +202,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
903 smp_mb();
904
905 __asm__ __volatile__("@ atomic_sub_return\n"
906-"1: ldrex %0, [%2]\n"
907-" sub %0, %0, %3\n"
908+"1: ldrex %1, [%2]\n"
909+" sub %0, %1, %3\n"
910+
911+#ifdef CONFIG_PAX_REFCOUNT
912+" bvc 3f\n"
913+" mov %0, %1\n"
914+"2: bkpt 0xf103\n"
915+"3:\n"
916+#endif
917+
918 " strex %1, %0, [%2]\n"
919 " teq %1, #0\n"
920 " bne 1b"
921+
922+#ifdef CONFIG_PAX_REFCOUNT
923+"\n4:\n"
924+ _ASM_EXTABLE(2b, 4b)
925+#endif
926+
927 : "=&r" (result), "=&r" (tmp)
928 : "r" (&v->counter), "Ir" (i)
929 : "cc");
930@@ -132,6 +252,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
931 return oldval;
932 }
933
934+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
935+{
936+ unsigned long oldval, res;
937+
938+ smp_mb();
939+
940+ do {
941+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
942+ "ldrex %1, [%2]\n"
943+ "mov %0, #0\n"
944+ "teq %1, %3\n"
945+ "strexeq %0, %4, [%2]\n"
946+ : "=&r" (res), "=&r" (oldval)
947+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
948+ : "cc");
949+ } while (res);
950+
951+ smp_mb();
952+
953+ return oldval;
954+}
955+
956 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
957 {
958 unsigned long tmp, tmp2;
959@@ -207,6 +349,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
960 #endif /* __LINUX_ARM_ARCH__ */
961
962 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
963+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
964+{
965+ return xchg(&v->counter, new);
966+}
967
968 static inline int atomic_add_unless(atomic_t *v, int a, int u)
969 {
970@@ -220,11 +366,27 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
971 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
972
973 #define atomic_inc(v) atomic_add(1, v)
974+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
975+{
976+ atomic_add_unchecked(1, v);
977+}
978 #define atomic_dec(v) atomic_sub(1, v)
979+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
980+{
981+ atomic_sub_unchecked(1, v);
982+}
983
984 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
985+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
986+{
987+ return atomic_add_return_unchecked(1, v) == 0;
988+}
989 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
990 #define atomic_inc_return(v) (atomic_add_return(1, v))
991+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
992+{
993+ return atomic_add_return_unchecked(1, v);
994+}
995 #define atomic_dec_return(v) (atomic_sub_return(1, v))
996 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
997
998diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
999index 66c160b..bca1449 100644
1000--- a/arch/arm/include/asm/cache.h
1001+++ b/arch/arm/include/asm/cache.h
1002@@ -4,8 +4,10 @@
1003 #ifndef __ASMARM_CACHE_H
1004 #define __ASMARM_CACHE_H
1005
1006+#include <linux/const.h>
1007+
1008 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1009-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1010+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1011
1012 /*
1013 * Memory returned by kmalloc() may be used for DMA, so we must make
1014diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1015index 3d0cdd2..19957c5 100644
1016--- a/arch/arm/include/asm/cacheflush.h
1017+++ b/arch/arm/include/asm/cacheflush.h
1018@@ -216,13 +216,13 @@ struct cpu_cache_fns {
1019 void (*dma_inv_range)(const void *, const void *);
1020 void (*dma_clean_range)(const void *, const void *);
1021 void (*dma_flush_range)(const void *, const void *);
1022-};
1023+} __no_const;
1024
1025 struct outer_cache_fns {
1026 void (*inv_range)(unsigned long, unsigned long);
1027 void (*clean_range)(unsigned long, unsigned long);
1028 void (*flush_range)(unsigned long, unsigned long);
1029-};
1030+} __no_const;
1031
1032 /*
1033 * Select the calling method
1034diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1035index 6aac3f5..265536b 100644
1036--- a/arch/arm/include/asm/elf.h
1037+++ b/arch/arm/include/asm/elf.h
1038@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1039 the loader. We need to make sure that it is out of the way of the program
1040 that it will "exec", and that there is sufficient room for the brk. */
1041
1042-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1043+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1044+
1045+#ifdef CONFIG_PAX_ASLR
1046+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1047+
1048+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1049+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1050+#endif
1051
1052 /* When the program starts, a1 contains a pointer to a function to be
1053 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1054diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1055index c019949..388fdd1 100644
1056--- a/arch/arm/include/asm/kmap_types.h
1057+++ b/arch/arm/include/asm/kmap_types.h
1058@@ -19,6 +19,7 @@ enum km_type {
1059 KM_SOFTIRQ0,
1060 KM_SOFTIRQ1,
1061 KM_L2_CACHE,
1062+ KM_CLEARPAGE,
1063 KM_TYPE_NR
1064 };
1065
1066diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1067index 3a32af4..c8def8a 100644
1068--- a/arch/arm/include/asm/page.h
1069+++ b/arch/arm/include/asm/page.h
1070@@ -122,7 +122,7 @@ struct cpu_user_fns {
1071 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1072 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1073 unsigned long vaddr);
1074-};
1075+} __no_const;
1076
1077 #ifdef MULTI_USER
1078 extern struct cpu_user_fns cpu_user;
1079diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
1080index d65b2f5..9d87555 100644
1081--- a/arch/arm/include/asm/system.h
1082+++ b/arch/arm/include/asm/system.h
1083@@ -86,6 +86,8 @@ void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
1084
1085 #define xchg(ptr,x) \
1086 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1087+#define xchg_unchecked(ptr,x) \
1088+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1089
1090 extern asmlinkage void __backtrace(void);
1091 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
1092@@ -98,7 +100,7 @@ extern int cpu_architecture(void);
1093 extern void cpu_init(void);
1094
1095 void arm_machine_restart(char mode, const char *cmd);
1096-extern void (*arm_pm_restart)(char str, const char *cmd);
1097+extern void (*arm_pm_restart)(char str, const char *cmd) __noreturn;
1098
1099 #define UDBG_UNDEFINED (1 << 0)
1100 #define UDBG_SYSCALL (1 << 1)
1101@@ -505,6 +507,13 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
1102
1103 #endif /* __LINUX_ARM_ARCH__ >= 6 */
1104
1105+#define _ASM_EXTABLE(from, to) \
1106+" .pushsection __ex_table,\"a\"\n"\
1107+" .align 3\n" \
1108+" .long " #from ", " #to"\n" \
1109+" .popsection"
1110+
1111+
1112 #endif /* __ASSEMBLY__ */
1113
1114 #define arch_align_stack(x) (x)
1115diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1116index 1d6bd40..fba0cb9 100644
1117--- a/arch/arm/include/asm/uaccess.h
1118+++ b/arch/arm/include/asm/uaccess.h
1119@@ -22,6 +22,8 @@
1120 #define VERIFY_READ 0
1121 #define VERIFY_WRITE 1
1122
1123+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1124+
1125 /*
1126 * The exception table consists of pairs of addresses: the first is the
1127 * address of an instruction that is allowed to fault, and the second is
1128@@ -387,8 +389,23 @@ do { \
1129
1130
1131 #ifdef CONFIG_MMU
1132-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1133-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1134+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1135+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1136+
1137+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1138+{
1139+ if (!__builtin_constant_p(n))
1140+ check_object_size(to, n, false);
1141+ return ___copy_from_user(to, from, n);
1142+}
1143+
1144+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1145+{
1146+ if (!__builtin_constant_p(n))
1147+ check_object_size(from, n, true);
1148+ return ___copy_to_user(to, from, n);
1149+}
1150+
1151 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1152 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1153 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1154@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1155
1156 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1157 {
1158+ if ((long)n < 0)
1159+ return n;
1160+
1161 if (access_ok(VERIFY_READ, from, n))
1162 n = __copy_from_user(to, from, n);
1163 else /* security hole - plug it */
1164@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1165
1166 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1167 {
1168+ if ((long)n < 0)
1169+ return n;
1170+
1171 if (access_ok(VERIFY_WRITE, to, n))
1172 n = __copy_to_user(to, from, n);
1173 return n;
1174diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1175index 0e62770..e2c2cd6 100644
1176--- a/arch/arm/kernel/armksyms.c
1177+++ b/arch/arm/kernel/armksyms.c
1178@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1179 #ifdef CONFIG_MMU
1180 EXPORT_SYMBOL(copy_page);
1181
1182-EXPORT_SYMBOL(__copy_from_user);
1183-EXPORT_SYMBOL(__copy_to_user);
1184+EXPORT_SYMBOL(___copy_from_user);
1185+EXPORT_SYMBOL(___copy_to_user);
1186 EXPORT_SYMBOL(__clear_user);
1187
1188 EXPORT_SYMBOL(__get_user_1);
1189diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
1190index ba8ccfe..2dc34dc 100644
1191--- a/arch/arm/kernel/kgdb.c
1192+++ b/arch/arm/kernel/kgdb.c
1193@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
1194 * and we handle the normal undef case within the do_undefinstr
1195 * handler.
1196 */
1197-struct kgdb_arch arch_kgdb_ops = {
1198+const struct kgdb_arch arch_kgdb_ops = {
1199 #ifndef __ARMEB__
1200 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
1201 #else /* ! __ARMEB__ */
1202diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1203index 61f90d3..771ab27 100644
1204--- a/arch/arm/kernel/process.c
1205+++ b/arch/arm/kernel/process.c
1206@@ -83,7 +83,7 @@ static int __init hlt_setup(char *__unused)
1207 __setup("nohlt", nohlt_setup);
1208 __setup("hlt", hlt_setup);
1209
1210-void arm_machine_restart(char mode, const char *cmd)
1211+__noreturn void arm_machine_restart(char mode, const char *cmd)
1212 {
1213 /*
1214 * Clean and disable cache, and turn off interrupts
1215@@ -117,7 +117,7 @@ void arm_machine_restart(char mode, const char *cmd)
1216 void (*pm_power_off)(void);
1217 EXPORT_SYMBOL(pm_power_off);
1218
1219-void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart;
1220+void (*arm_pm_restart)(char str, const char *cmd) __noreturn = arm_machine_restart;
1221 EXPORT_SYMBOL_GPL(arm_pm_restart);
1222
1223
1224@@ -195,6 +195,7 @@ __setup("reboot=", reboot_setup);
1225
1226 void machine_halt(void)
1227 {
1228+ BUG();
1229 }
1230
1231
1232@@ -202,6 +203,7 @@ void machine_power_off(void)
1233 {
1234 if (pm_power_off)
1235 pm_power_off();
1236+ BUG();
1237 }
1238
1239 void machine_restart(char *cmd)
1240diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1241index c6c57b6..0c3b29e 100644
1242--- a/arch/arm/kernel/setup.c
1243+++ b/arch/arm/kernel/setup.c
1244@@ -92,16 +92,16 @@ EXPORT_SYMBOL(elf_hwcap);
1245 struct processor processor;
1246 #endif
1247 #ifdef MULTI_TLB
1248-struct cpu_tlb_fns cpu_tlb;
1249+struct cpu_tlb_fns cpu_tlb __read_only;
1250 #endif
1251 #ifdef MULTI_USER
1252-struct cpu_user_fns cpu_user;
1253+struct cpu_user_fns cpu_user __read_only;
1254 #endif
1255 #ifdef MULTI_CACHE
1256-struct cpu_cache_fns cpu_cache;
1257+struct cpu_cache_fns cpu_cache __read_only;
1258 #endif
1259 #ifdef CONFIG_OUTER_CACHE
1260-struct outer_cache_fns outer_cache;
1261+struct outer_cache_fns outer_cache __read_only;
1262 #endif
1263
1264 struct stack {
1265diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1266index 3f361a7..6e806e1 100644
1267--- a/arch/arm/kernel/traps.c
1268+++ b/arch/arm/kernel/traps.c
1269@@ -247,6 +247,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
1270
1271 DEFINE_SPINLOCK(die_lock);
1272
1273+extern void gr_handle_kernel_exploit(void);
1274+
1275 /*
1276 * This function is protected against re-entrancy.
1277 */
1278@@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
1279 if (panic_on_oops)
1280 panic("Fatal exception");
1281
1282+ gr_handle_kernel_exploit();
1283+
1284 do_exit(SIGSEGV);
1285 }
1286
1287diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
1288index aecf87df..bed731b 100644
1289--- a/arch/arm/kernel/vmlinux.lds.S
1290+++ b/arch/arm/kernel/vmlinux.lds.S
1291@@ -74,14 +74,18 @@ SECTIONS
1292 #ifndef CONFIG_XIP_KERNEL
1293 __init_begin = _stext;
1294 INIT_DATA
1295+ EXIT_TEXT
1296+ EXIT_DATA
1297 . = ALIGN(PAGE_SIZE);
1298 __init_end = .;
1299 #endif
1300 }
1301
1302 /DISCARD/ : { /* Exit code and data */
1303+#ifdef CONFIG_XIP_KERNEL
1304 EXIT_TEXT
1305 EXIT_DATA
1306+#endif
1307 *(.exitcall.exit)
1308 *(.discard)
1309 *(.ARM.exidx.exit.text)
1310diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1311index e4fe124..0fc246b 100644
1312--- a/arch/arm/lib/copy_from_user.S
1313+++ b/arch/arm/lib/copy_from_user.S
1314@@ -16,7 +16,7 @@
1315 /*
1316 * Prototype:
1317 *
1318- * size_t __copy_from_user(void *to, const void *from, size_t n)
1319+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
1320 *
1321 * Purpose:
1322 *
1323@@ -84,11 +84,11 @@
1324
1325 .text
1326
1327-ENTRY(__copy_from_user)
1328+ENTRY(___copy_from_user)
1329
1330 #include "copy_template.S"
1331
1332-ENDPROC(__copy_from_user)
1333+ENDPROC(___copy_from_user)
1334
1335 .section .fixup,"ax"
1336 .align 0
1337diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1338index 6ee2f67..d1cce76 100644
1339--- a/arch/arm/lib/copy_page.S
1340+++ b/arch/arm/lib/copy_page.S
1341@@ -10,6 +10,7 @@
1342 * ASM optimised string functions
1343 */
1344 #include <linux/linkage.h>
1345+#include <linux/const.h>
1346 #include <asm/assembler.h>
1347 #include <asm/asm-offsets.h>
1348 #include <asm/cache.h>
1349diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1350index 1a71e15..ac7b258 100644
1351--- a/arch/arm/lib/copy_to_user.S
1352+++ b/arch/arm/lib/copy_to_user.S
1353@@ -16,7 +16,7 @@
1354 /*
1355 * Prototype:
1356 *
1357- * size_t __copy_to_user(void *to, const void *from, size_t n)
1358+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
1359 *
1360 * Purpose:
1361 *
1362@@ -88,11 +88,11 @@
1363 .text
1364
1365 ENTRY(__copy_to_user_std)
1366-WEAK(__copy_to_user)
1367+WEAK(___copy_to_user)
1368
1369 #include "copy_template.S"
1370
1371-ENDPROC(__copy_to_user)
1372+ENDPROC(___copy_to_user)
1373
1374 .section .fixup,"ax"
1375 .align 0
1376diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1377index ffdd274..91017b6 100644
1378--- a/arch/arm/lib/uaccess.S
1379+++ b/arch/arm/lib/uaccess.S
1380@@ -19,7 +19,7 @@
1381
1382 #define PAGE_SHIFT 12
1383
1384-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1385+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1386 * Purpose : copy a block to user memory from kernel memory
1387 * Params : to - user memory
1388 * : from - kernel memory
1389@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
1390 sub r2, r2, ip
1391 b .Lc2u_dest_aligned
1392
1393-ENTRY(__copy_to_user)
1394+ENTRY(___copy_to_user)
1395 stmfd sp!, {r2, r4 - r7, lr}
1396 cmp r2, #4
1397 blt .Lc2u_not_enough
1398@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
1399 ldrgtb r3, [r1], #0
1400 USER( strgtbt r3, [r0], #1) @ May fault
1401 b .Lc2u_finished
1402-ENDPROC(__copy_to_user)
1403+ENDPROC(___copy_to_user)
1404
1405 .section .fixup,"ax"
1406 .align 0
1407 9001: ldmfd sp!, {r0, r4 - r7, pc}
1408 .previous
1409
1410-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1411+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1412 * Purpose : copy a block from user memory to kernel memory
1413 * Params : to - kernel memory
1414 * : from - user memory
1415@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
1416 sub r2, r2, ip
1417 b .Lcfu_dest_aligned
1418
1419-ENTRY(__copy_from_user)
1420+ENTRY(___copy_from_user)
1421 stmfd sp!, {r0, r2, r4 - r7, lr}
1422 cmp r2, #4
1423 blt .Lcfu_not_enough
1424@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
1425 USER( ldrgtbt r3, [r1], #1) @ May fault
1426 strgtb r3, [r0], #1
1427 b .Lcfu_finished
1428-ENDPROC(__copy_from_user)
1429+ENDPROC(___copy_from_user)
1430
1431 .section .fixup,"ax"
1432 .align 0
1433diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1434index 6b967ff..67d5b2b 100644
1435--- a/arch/arm/lib/uaccess_with_memcpy.c
1436+++ b/arch/arm/lib/uaccess_with_memcpy.c
1437@@ -97,7 +97,7 @@ out:
1438 }
1439
1440 unsigned long
1441-__copy_to_user(void __user *to, const void *from, unsigned long n)
1442+___copy_to_user(void __user *to, const void *from, unsigned long n)
1443 {
1444 /*
1445 * This test is stubbed out of the main function above to keep
1446diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
1447index 4028724..beec230 100644
1448--- a/arch/arm/mach-at91/pm.c
1449+++ b/arch/arm/mach-at91/pm.c
1450@@ -348,7 +348,7 @@ static void at91_pm_end(void)
1451 }
1452
1453
1454-static struct platform_suspend_ops at91_pm_ops ={
1455+static const struct platform_suspend_ops at91_pm_ops ={
1456 .valid = at91_pm_valid_state,
1457 .begin = at91_pm_begin,
1458 .enter = at91_pm_enter,
1459diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
1460index 5218943..0a34552 100644
1461--- a/arch/arm/mach-omap1/pm.c
1462+++ b/arch/arm/mach-omap1/pm.c
1463@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
1464
1465
1466
1467-static struct platform_suspend_ops omap_pm_ops ={
1468+static const struct platform_suspend_ops omap_pm_ops ={
1469 .prepare = omap_pm_prepare,
1470 .enter = omap_pm_enter,
1471 .finish = omap_pm_finish,
1472diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
1473index bff5c4e..d4c649b 100644
1474--- a/arch/arm/mach-omap2/pm24xx.c
1475+++ b/arch/arm/mach-omap2/pm24xx.c
1476@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
1477 enable_hlt();
1478 }
1479
1480-static struct platform_suspend_ops omap_pm_ops = {
1481+static const struct platform_suspend_ops omap_pm_ops = {
1482 .prepare = omap2_pm_prepare,
1483 .enter = omap2_pm_enter,
1484 .finish = omap2_pm_finish,
1485diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
1486index 8946319..7d3e661 100644
1487--- a/arch/arm/mach-omap2/pm34xx.c
1488+++ b/arch/arm/mach-omap2/pm34xx.c
1489@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
1490 return;
1491 }
1492
1493-static struct platform_suspend_ops omap_pm_ops = {
1494+static const struct platform_suspend_ops omap_pm_ops = {
1495 .begin = omap3_pm_begin,
1496 .end = omap3_pm_end,
1497 .prepare = omap3_pm_prepare,
1498diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
1499index b3d8d53..6e68ebc 100644
1500--- a/arch/arm/mach-pnx4008/pm.c
1501+++ b/arch/arm/mach-pnx4008/pm.c
1502@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
1503 (state == PM_SUSPEND_MEM);
1504 }
1505
1506-static struct platform_suspend_ops pnx4008_pm_ops = {
1507+static const struct platform_suspend_ops pnx4008_pm_ops = {
1508 .enter = pnx4008_pm_enter,
1509 .valid = pnx4008_pm_valid,
1510 };
1511diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
1512index 7693355..9beb00a 100644
1513--- a/arch/arm/mach-pxa/pm.c
1514+++ b/arch/arm/mach-pxa/pm.c
1515@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
1516 pxa_cpu_pm_fns->finish();
1517 }
1518
1519-static struct platform_suspend_ops pxa_pm_ops = {
1520+static const struct platform_suspend_ops pxa_pm_ops = {
1521 .valid = pxa_pm_valid,
1522 .enter = pxa_pm_enter,
1523 .prepare = pxa_pm_prepare,
1524diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
1525index 629e05d..06be589 100644
1526--- a/arch/arm/mach-pxa/sharpsl_pm.c
1527+++ b/arch/arm/mach-pxa/sharpsl_pm.c
1528@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
1529 }
1530
1531 #ifdef CONFIG_PM
1532-static struct platform_suspend_ops sharpsl_pm_ops = {
1533+static const struct platform_suspend_ops sharpsl_pm_ops = {
1534 .prepare = pxa_pm_prepare,
1535 .finish = pxa_pm_finish,
1536 .enter = corgi_pxa_pm_enter,
1537diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
1538index c83fdc8..ab9fc44 100644
1539--- a/arch/arm/mach-sa1100/pm.c
1540+++ b/arch/arm/mach-sa1100/pm.c
1541@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
1542 return virt_to_phys(sp);
1543 }
1544
1545-static struct platform_suspend_ops sa11x0_pm_ops = {
1546+static const struct platform_suspend_ops sa11x0_pm_ops = {
1547 .enter = sa11x0_pm_enter,
1548 .valid = suspend_valid_only_mem,
1549 };
1550diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1551index 3191cd6..c322981 100644
1552--- a/arch/arm/mm/fault.c
1553+++ b/arch/arm/mm/fault.c
1554@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1555 }
1556 #endif
1557
1558+#ifdef CONFIG_PAX_PAGEEXEC
1559+ if (fsr & FSR_LNX_PF) {
1560+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1561+ do_group_exit(SIGKILL);
1562+ }
1563+#endif
1564+
1565 tsk->thread.address = addr;
1566 tsk->thread.error_code = fsr;
1567 tsk->thread.trap_no = 14;
1568@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1569 }
1570 #endif /* CONFIG_MMU */
1571
1572+#ifdef CONFIG_PAX_PAGEEXEC
1573+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1574+{
1575+ long i;
1576+
1577+ printk(KERN_ERR "PAX: bytes at PC: ");
1578+ for (i = 0; i < 20; i++) {
1579+ unsigned char c;
1580+ if (get_user(c, (__force unsigned char __user *)pc+i))
1581+ printk(KERN_CONT "?? ");
1582+ else
1583+ printk(KERN_CONT "%02x ", c);
1584+ }
1585+ printk("\n");
1586+
1587+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1588+ for (i = -1; i < 20; i++) {
1589+ unsigned long c;
1590+ if (get_user(c, (__force unsigned long __user *)sp+i))
1591+ printk(KERN_CONT "???????? ");
1592+ else
1593+ printk(KERN_CONT "%08lx ", c);
1594+ }
1595+ printk("\n");
1596+}
1597+#endif
1598+
1599 /*
1600 * First Level Translation Fault Handler
1601 *
1602@@ -569,6 +603,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1603 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1604 struct siginfo info;
1605
1606+#ifdef CONFIG_PAX_REFCOUNT
1607+ if (fsr_fs(ifsr) == 2) {
1608+ unsigned int bkpt;
1609+
1610+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1611+ current->thread.error_code = ifsr;
1612+ current->thread.trap_no = 0;
1613+ pax_report_refcount_overflow(regs);
1614+ fixup_exception(regs);
1615+ return;
1616+ }
1617+ }
1618+#endif
1619+
1620 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1621 return;
1622
1623diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1624index f5abc51..7ec524c 100644
1625--- a/arch/arm/mm/mmap.c
1626+++ b/arch/arm/mm/mmap.c
1627@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1628 if (len > TASK_SIZE)
1629 return -ENOMEM;
1630
1631+#ifdef CONFIG_PAX_RANDMMAP
1632+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1633+#endif
1634+
1635 if (addr) {
1636 if (do_align)
1637 addr = COLOUR_ALIGN(addr, pgoff);
1638@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1639 addr = PAGE_ALIGN(addr);
1640
1641 vma = find_vma(mm, addr);
1642- if (TASK_SIZE - len >= addr &&
1643- (!vma || addr + len <= vma->vm_start))
1644+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1645 return addr;
1646 }
1647 if (len > mm->cached_hole_size) {
1648- start_addr = addr = mm->free_area_cache;
1649+ start_addr = addr = mm->free_area_cache;
1650 } else {
1651- start_addr = addr = TASK_UNMAPPED_BASE;
1652- mm->cached_hole_size = 0;
1653+ start_addr = addr = mm->mmap_base;
1654+ mm->cached_hole_size = 0;
1655 }
1656
1657 full_search:
1658@@ -94,14 +97,14 @@ full_search:
1659 * Start a new search - just in case we missed
1660 * some holes.
1661 */
1662- if (start_addr != TASK_UNMAPPED_BASE) {
1663- start_addr = addr = TASK_UNMAPPED_BASE;
1664+ if (start_addr != mm->mmap_base) {
1665+ start_addr = addr = mm->mmap_base;
1666 mm->cached_hole_size = 0;
1667 goto full_search;
1668 }
1669 return -ENOMEM;
1670 }
1671- if (!vma || addr + len <= vma->vm_start) {
1672+ if (check_heap_stack_gap(vma, addr, len)) {
1673 /*
1674 * Remember the place where we stopped the search:
1675 */
1676diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
1677index 8d97db2..b66cfa5 100644
1678--- a/arch/arm/plat-s3c/pm.c
1679+++ b/arch/arm/plat-s3c/pm.c
1680@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
1681 s3c_pm_check_cleanup();
1682 }
1683
1684-static struct platform_suspend_ops s3c_pm_ops = {
1685+static const struct platform_suspend_ops s3c_pm_ops = {
1686 .enter = s3c_pm_enter,
1687 .prepare = s3c_pm_prepare,
1688 .finish = s3c_pm_finish,
1689diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
1690index d3cf35a..0ba6053 100644
1691--- a/arch/avr32/include/asm/cache.h
1692+++ b/arch/avr32/include/asm/cache.h
1693@@ -1,8 +1,10 @@
1694 #ifndef __ASM_AVR32_CACHE_H
1695 #define __ASM_AVR32_CACHE_H
1696
1697+#include <linux/const.h>
1698+
1699 #define L1_CACHE_SHIFT 5
1700-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1701+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1702
1703 /*
1704 * Memory returned by kmalloc() may be used for DMA, so we must make
1705diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1706index d5d1d41..856e2ed 100644
1707--- a/arch/avr32/include/asm/elf.h
1708+++ b/arch/avr32/include/asm/elf.h
1709@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1710 the loader. We need to make sure that it is out of the way of the program
1711 that it will "exec", and that there is sufficient room for the brk. */
1712
1713-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1714+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1715
1716+#ifdef CONFIG_PAX_ASLR
1717+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1718+
1719+#define PAX_DELTA_MMAP_LEN 15
1720+#define PAX_DELTA_STACK_LEN 15
1721+#endif
1722
1723 /* This yields a mask that user programs can use to figure out what
1724 instruction set this CPU supports. This could be done in user space,
1725diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1726index b7f5c68..556135c 100644
1727--- a/arch/avr32/include/asm/kmap_types.h
1728+++ b/arch/avr32/include/asm/kmap_types.h
1729@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1730 D(11) KM_IRQ1,
1731 D(12) KM_SOFTIRQ0,
1732 D(13) KM_SOFTIRQ1,
1733-D(14) KM_TYPE_NR
1734+D(14) KM_CLEARPAGE,
1735+D(15) KM_TYPE_NR
1736 };
1737
1738 #undef D
1739diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
1740index f021edf..32d680e 100644
1741--- a/arch/avr32/mach-at32ap/pm.c
1742+++ b/arch/avr32/mach-at32ap/pm.c
1743@@ -176,7 +176,7 @@ out:
1744 return 0;
1745 }
1746
1747-static struct platform_suspend_ops avr32_pm_ops = {
1748+static const struct platform_suspend_ops avr32_pm_ops = {
1749 .valid = avr32_pm_valid_state,
1750 .enter = avr32_pm_enter,
1751 };
1752diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1753index b61d86d..e292c7f 100644
1754--- a/arch/avr32/mm/fault.c
1755+++ b/arch/avr32/mm/fault.c
1756@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1757
1758 int exception_trace = 1;
1759
1760+#ifdef CONFIG_PAX_PAGEEXEC
1761+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1762+{
1763+ unsigned long i;
1764+
1765+ printk(KERN_ERR "PAX: bytes at PC: ");
1766+ for (i = 0; i < 20; i++) {
1767+ unsigned char c;
1768+ if (get_user(c, (unsigned char *)pc+i))
1769+ printk(KERN_CONT "???????? ");
1770+ else
1771+ printk(KERN_CONT "%02x ", c);
1772+ }
1773+ printk("\n");
1774+}
1775+#endif
1776+
1777 /*
1778 * This routine handles page faults. It determines the address and the
1779 * problem, and then passes it off to one of the appropriate routines.
1780@@ -157,6 +174,16 @@ bad_area:
1781 up_read(&mm->mmap_sem);
1782
1783 if (user_mode(regs)) {
1784+
1785+#ifdef CONFIG_PAX_PAGEEXEC
1786+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1787+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1788+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1789+ do_group_exit(SIGKILL);
1790+ }
1791+ }
1792+#endif
1793+
1794 if (exception_trace && printk_ratelimit())
1795 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1796 "sp %08lx ecr %lu\n",
1797diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
1798index 93f6c63..d144953 100644
1799--- a/arch/blackfin/include/asm/cache.h
1800+++ b/arch/blackfin/include/asm/cache.h
1801@@ -7,12 +7,14 @@
1802 #ifndef __ARCH_BLACKFIN_CACHE_H
1803 #define __ARCH_BLACKFIN_CACHE_H
1804
1805+#include <linux/const.h>
1806+
1807 /*
1808 * Bytes per L1 cache line
1809 * Blackfin loads 32 bytes for cache
1810 */
1811 #define L1_CACHE_SHIFT 5
1812-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1813+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1814 #define SMP_CACHE_BYTES L1_CACHE_BYTES
1815
1816 #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
1817diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
1818index cce79d0..c406c85 100644
1819--- a/arch/blackfin/kernel/kgdb.c
1820+++ b/arch/blackfin/kernel/kgdb.c
1821@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
1822 return -1; /* this means that we do not want to exit from the handler */
1823 }
1824
1825-struct kgdb_arch arch_kgdb_ops = {
1826+const struct kgdb_arch arch_kgdb_ops = {
1827 .gdb_bpt_instr = {0xa1},
1828 #ifdef CONFIG_SMP
1829 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
1830diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
1831index 8837be4..b2fb413 100644
1832--- a/arch/blackfin/mach-common/pm.c
1833+++ b/arch/blackfin/mach-common/pm.c
1834@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
1835 return 0;
1836 }
1837
1838-struct platform_suspend_ops bfin_pm_ops = {
1839+const struct platform_suspend_ops bfin_pm_ops = {
1840 .enter = bfin_pm_enter,
1841 .valid = bfin_pm_valid,
1842 };
1843diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
1844index aea2718..3639a60 100644
1845--- a/arch/cris/include/arch-v10/arch/cache.h
1846+++ b/arch/cris/include/arch-v10/arch/cache.h
1847@@ -1,8 +1,9 @@
1848 #ifndef _ASM_ARCH_CACHE_H
1849 #define _ASM_ARCH_CACHE_H
1850
1851+#include <linux/const.h>
1852 /* Etrax 100LX have 32-byte cache-lines. */
1853-#define L1_CACHE_BYTES 32
1854 #define L1_CACHE_SHIFT 5
1855+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1856
1857 #endif /* _ASM_ARCH_CACHE_H */
1858diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
1859index dfc7305..417f5b3 100644
1860--- a/arch/cris/include/arch-v32/arch/cache.h
1861+++ b/arch/cris/include/arch-v32/arch/cache.h
1862@@ -1,11 +1,12 @@
1863 #ifndef _ASM_CRIS_ARCH_CACHE_H
1864 #define _ASM_CRIS_ARCH_CACHE_H
1865
1866+#include <linux/const.h>
1867 #include <arch/hwregs/dma.h>
1868
1869 /* A cache-line is 32 bytes. */
1870-#define L1_CACHE_BYTES 32
1871 #define L1_CACHE_SHIFT 5
1872+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1873
1874 void flush_dma_list(dma_descr_data *descr);
1875 void flush_dma_descr(dma_descr_data *descr, int flush_buf);
1876diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
1877index 00a57af..c3ef0cd 100644
1878--- a/arch/frv/include/asm/atomic.h
1879+++ b/arch/frv/include/asm/atomic.h
1880@@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
1881 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
1882 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
1883
1884+#define atomic64_read_unchecked(v) atomic64_read(v)
1885+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1886+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1887+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1888+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1889+#define atomic64_inc_unchecked(v) atomic64_inc(v)
1890+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1891+#define atomic64_dec_unchecked(v) atomic64_dec(v)
1892+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1893+
1894 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
1895 {
1896 int c, old;
1897diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
1898index 7dc0f0f..1e6a620 100644
1899--- a/arch/frv/include/asm/cache.h
1900+++ b/arch/frv/include/asm/cache.h
1901@@ -12,10 +12,11 @@
1902 #ifndef __ASM_CACHE_H
1903 #define __ASM_CACHE_H
1904
1905+#include <linux/const.h>
1906
1907 /* bytes per L1 cache line */
1908 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
1909-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1910+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1911
1912 #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
1913
1914diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1915index f8e16b2..c73ff79 100644
1916--- a/arch/frv/include/asm/kmap_types.h
1917+++ b/arch/frv/include/asm/kmap_types.h
1918@@ -23,6 +23,7 @@ enum km_type {
1919 KM_IRQ1,
1920 KM_SOFTIRQ0,
1921 KM_SOFTIRQ1,
1922+ KM_CLEARPAGE,
1923 KM_TYPE_NR
1924 };
1925
1926diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1927index 385fd30..6c3d97e 100644
1928--- a/arch/frv/mm/elf-fdpic.c
1929+++ b/arch/frv/mm/elf-fdpic.c
1930@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1931 if (addr) {
1932 addr = PAGE_ALIGN(addr);
1933 vma = find_vma(current->mm, addr);
1934- if (TASK_SIZE - len >= addr &&
1935- (!vma || addr + len <= vma->vm_start))
1936+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1937 goto success;
1938 }
1939
1940@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1941 for (; vma; vma = vma->vm_next) {
1942 if (addr > limit)
1943 break;
1944- if (addr + len <= vma->vm_start)
1945+ if (check_heap_stack_gap(vma, addr, len))
1946 goto success;
1947 addr = vma->vm_end;
1948 }
1949@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1950 for (; vma; vma = vma->vm_next) {
1951 if (addr > limit)
1952 break;
1953- if (addr + len <= vma->vm_start)
1954+ if (check_heap_stack_gap(vma, addr, len))
1955 goto success;
1956 addr = vma->vm_end;
1957 }
1958diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
1959index c635028..6d9445a 100644
1960--- a/arch/h8300/include/asm/cache.h
1961+++ b/arch/h8300/include/asm/cache.h
1962@@ -1,8 +1,10 @@
1963 #ifndef __ARCH_H8300_CACHE_H
1964 #define __ARCH_H8300_CACHE_H
1965
1966+#include <linux/const.h>
1967+
1968 /* bytes per L1 cache line */
1969-#define L1_CACHE_BYTES 4
1970+#define L1_CACHE_BYTES _AC(4,UL)
1971
1972 /* m68k-elf-gcc 2.95.2 doesn't like these */
1973
1974diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
1975index e4a80d8..11a7ea1 100644
1976--- a/arch/ia64/hp/common/hwsw_iommu.c
1977+++ b/arch/ia64/hp/common/hwsw_iommu.c
1978@@ -17,7 +17,7 @@
1979 #include <linux/swiotlb.h>
1980 #include <asm/machvec.h>
1981
1982-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1983+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1984
1985 /* swiotlb declarations & definitions: */
1986 extern int swiotlb_late_init_with_default_size (size_t size);
1987@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
1988 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
1989 }
1990
1991-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1992+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1993 {
1994 if (use_swiotlb(dev))
1995 return &swiotlb_dma_ops;
1996diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
1997index 01ae69b..35752fd 100644
1998--- a/arch/ia64/hp/common/sba_iommu.c
1999+++ b/arch/ia64/hp/common/sba_iommu.c
2000@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
2001 },
2002 };
2003
2004-extern struct dma_map_ops swiotlb_dma_ops;
2005+extern const struct dma_map_ops swiotlb_dma_ops;
2006
2007 static int __init
2008 sba_init(void)
2009@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
2010
2011 __setup("sbapagesize=",sba_page_override);
2012
2013-struct dma_map_ops sba_dma_ops = {
2014+const struct dma_map_ops sba_dma_ops = {
2015 .alloc_coherent = sba_alloc_coherent,
2016 .free_coherent = sba_free_coherent,
2017 .map_page = sba_map_page,
2018diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
2019index c69552b..c7122f4 100644
2020--- a/arch/ia64/ia32/binfmt_elf32.c
2021+++ b/arch/ia64/ia32/binfmt_elf32.c
2022@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
2023
2024 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
2025
2026+#ifdef CONFIG_PAX_ASLR
2027+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2028+
2029+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2030+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2031+#endif
2032+
2033 /* Ugly but avoids duplication */
2034 #include "../../../fs/binfmt_elf.c"
2035
2036diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
2037index 0f15349..26b3429 100644
2038--- a/arch/ia64/ia32/ia32priv.h
2039+++ b/arch/ia64/ia32/ia32priv.h
2040@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
2041 #define ELF_DATA ELFDATA2LSB
2042 #define ELF_ARCH EM_386
2043
2044-#define IA32_STACK_TOP IA32_PAGE_OFFSET
2045+#ifdef CONFIG_PAX_RANDUSTACK
2046+#define __IA32_DELTA_STACK (current->mm->delta_stack)
2047+#else
2048+#define __IA32_DELTA_STACK 0UL
2049+#endif
2050+
2051+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
2052+
2053 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
2054 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
2055
2056diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2057index 88405cb..de5ca5d 100644
2058--- a/arch/ia64/include/asm/atomic.h
2059+++ b/arch/ia64/include/asm/atomic.h
2060@@ -210,6 +210,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2061 #define atomic64_inc(v) atomic64_add(1, (v))
2062 #define atomic64_dec(v) atomic64_sub(1, (v))
2063
2064+#define atomic64_read_unchecked(v) atomic64_read(v)
2065+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2066+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2067+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2068+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2069+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2070+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2071+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2072+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2073+
2074 /* Atomic operations are already serializing */
2075 #define smp_mb__before_atomic_dec() barrier()
2076 #define smp_mb__after_atomic_dec() barrier()
2077diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2078index e7482bd..d1c9b8e 100644
2079--- a/arch/ia64/include/asm/cache.h
2080+++ b/arch/ia64/include/asm/cache.h
2081@@ -1,6 +1,7 @@
2082 #ifndef _ASM_IA64_CACHE_H
2083 #define _ASM_IA64_CACHE_H
2084
2085+#include <linux/const.h>
2086
2087 /*
2088 * Copyright (C) 1998-2000 Hewlett-Packard Co
2089@@ -9,7 +10,7 @@
2090
2091 /* Bytes per L1 (data) cache line. */
2092 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2093-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2094+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2095
2096 #ifdef CONFIG_SMP
2097 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2098diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
2099index 8d3c79c..71b3af6 100644
2100--- a/arch/ia64/include/asm/dma-mapping.h
2101+++ b/arch/ia64/include/asm/dma-mapping.h
2102@@ -12,7 +12,7 @@
2103
2104 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
2105
2106-extern struct dma_map_ops *dma_ops;
2107+extern const struct dma_map_ops *dma_ops;
2108 extern struct ia64_machine_vector ia64_mv;
2109 extern void set_iommu_machvec(void);
2110
2111@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
2112 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2113 dma_addr_t *daddr, gfp_t gfp)
2114 {
2115- struct dma_map_ops *ops = platform_dma_get_ops(dev);
2116+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
2117 void *caddr;
2118
2119 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
2120@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2121 static inline void dma_free_coherent(struct device *dev, size_t size,
2122 void *caddr, dma_addr_t daddr)
2123 {
2124- struct dma_map_ops *ops = platform_dma_get_ops(dev);
2125+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
2126 debug_dma_free_coherent(dev, size, caddr, daddr);
2127 ops->free_coherent(dev, size, caddr, daddr);
2128 }
2129@@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
2130
2131 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
2132 {
2133- struct dma_map_ops *ops = platform_dma_get_ops(dev);
2134+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
2135 return ops->mapping_error(dev, daddr);
2136 }
2137
2138 static inline int dma_supported(struct device *dev, u64 mask)
2139 {
2140- struct dma_map_ops *ops = platform_dma_get_ops(dev);
2141+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
2142 return ops->dma_supported(dev, mask);
2143 }
2144
2145diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2146index 86eddee..b116bb4 100644
2147--- a/arch/ia64/include/asm/elf.h
2148+++ b/arch/ia64/include/asm/elf.h
2149@@ -43,6 +43,13 @@
2150 */
2151 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2152
2153+#ifdef CONFIG_PAX_ASLR
2154+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2155+
2156+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2157+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2158+#endif
2159+
2160 #define PT_IA_64_UNWIND 0x70000001
2161
2162 /* IA-64 relocations: */
2163diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
2164index 367d299..9ad4279 100644
2165--- a/arch/ia64/include/asm/machvec.h
2166+++ b/arch/ia64/include/asm/machvec.h
2167@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
2168 /* DMA-mapping interface: */
2169 typedef void ia64_mv_dma_init (void);
2170 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
2171-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
2172+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
2173
2174 /*
2175 * WARNING: The legacy I/O space is _architected_. Platforms are
2176@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
2177 # endif /* CONFIG_IA64_GENERIC */
2178
2179 extern void swiotlb_dma_init(void);
2180-extern struct dma_map_ops *dma_get_ops(struct device *);
2181+extern const struct dma_map_ops *dma_get_ops(struct device *);
2182
2183 /*
2184 * Define default versions so we can extend machvec for new platforms without having
2185diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2186index 8840a69..cdb63d9 100644
2187--- a/arch/ia64/include/asm/pgtable.h
2188+++ b/arch/ia64/include/asm/pgtable.h
2189@@ -12,7 +12,7 @@
2190 * David Mosberger-Tang <davidm@hpl.hp.com>
2191 */
2192
2193-
2194+#include <linux/const.h>
2195 #include <asm/mman.h>
2196 #include <asm/page.h>
2197 #include <asm/processor.h>
2198@@ -143,6 +143,17 @@
2199 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2200 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2201 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2202+
2203+#ifdef CONFIG_PAX_PAGEEXEC
2204+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2205+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2206+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2207+#else
2208+# define PAGE_SHARED_NOEXEC PAGE_SHARED
2209+# define PAGE_READONLY_NOEXEC PAGE_READONLY
2210+# define PAGE_COPY_NOEXEC PAGE_COPY
2211+#endif
2212+
2213 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2214 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2215 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2216diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2217index 239ecdc..f94170e 100644
2218--- a/arch/ia64/include/asm/spinlock.h
2219+++ b/arch/ia64/include/asm/spinlock.h
2220@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
2221 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2222
2223 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2224- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2225+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2226 }
2227
2228 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
2229diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2230index 449c8c0..432a3d2 100644
2231--- a/arch/ia64/include/asm/uaccess.h
2232+++ b/arch/ia64/include/asm/uaccess.h
2233@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2234 const void *__cu_from = (from); \
2235 long __cu_len = (n); \
2236 \
2237- if (__access_ok(__cu_to, __cu_len, get_fs())) \
2238+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2239 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2240 __cu_len; \
2241 })
2242@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2243 long __cu_len = (n); \
2244 \
2245 __chk_user_ptr(__cu_from); \
2246- if (__access_ok(__cu_from, __cu_len, get_fs())) \
2247+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2248 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2249 __cu_len; \
2250 })
2251diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
2252index f2c1600..969398a 100644
2253--- a/arch/ia64/kernel/dma-mapping.c
2254+++ b/arch/ia64/kernel/dma-mapping.c
2255@@ -3,7 +3,7 @@
2256 /* Set this to 1 if there is a HW IOMMU in the system */
2257 int iommu_detected __read_mostly;
2258
2259-struct dma_map_ops *dma_ops;
2260+const struct dma_map_ops *dma_ops;
2261 EXPORT_SYMBOL(dma_ops);
2262
2263 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
2264@@ -16,7 +16,7 @@ static int __init dma_init(void)
2265 }
2266 fs_initcall(dma_init);
2267
2268-struct dma_map_ops *dma_get_ops(struct device *dev)
2269+const struct dma_map_ops *dma_get_ops(struct device *dev)
2270 {
2271 return dma_ops;
2272 }
2273diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2274index 1481b0a..e7d38ff 100644
2275--- a/arch/ia64/kernel/module.c
2276+++ b/arch/ia64/kernel/module.c
2277@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
2278 void
2279 module_free (struct module *mod, void *module_region)
2280 {
2281- if (mod && mod->arch.init_unw_table &&
2282- module_region == mod->module_init) {
2283+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2284 unw_remove_unwind_table(mod->arch.init_unw_table);
2285 mod->arch.init_unw_table = NULL;
2286 }
2287@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2288 }
2289
2290 static inline int
2291+in_init_rx (const struct module *mod, uint64_t addr)
2292+{
2293+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2294+}
2295+
2296+static inline int
2297+in_init_rw (const struct module *mod, uint64_t addr)
2298+{
2299+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2300+}
2301+
2302+static inline int
2303 in_init (const struct module *mod, uint64_t addr)
2304 {
2305- return addr - (uint64_t) mod->module_init < mod->init_size;
2306+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2307+}
2308+
2309+static inline int
2310+in_core_rx (const struct module *mod, uint64_t addr)
2311+{
2312+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2313+}
2314+
2315+static inline int
2316+in_core_rw (const struct module *mod, uint64_t addr)
2317+{
2318+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2319 }
2320
2321 static inline int
2322 in_core (const struct module *mod, uint64_t addr)
2323 {
2324- return addr - (uint64_t) mod->module_core < mod->core_size;
2325+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2326 }
2327
2328 static inline int
2329@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2330 break;
2331
2332 case RV_BDREL:
2333- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2334+ if (in_init_rx(mod, val))
2335+ val -= (uint64_t) mod->module_init_rx;
2336+ else if (in_init_rw(mod, val))
2337+ val -= (uint64_t) mod->module_init_rw;
2338+ else if (in_core_rx(mod, val))
2339+ val -= (uint64_t) mod->module_core_rx;
2340+ else if (in_core_rw(mod, val))
2341+ val -= (uint64_t) mod->module_core_rw;
2342 break;
2343
2344 case RV_LTV:
2345@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2346 * addresses have been selected...
2347 */
2348 uint64_t gp;
2349- if (mod->core_size > MAX_LTOFF)
2350+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2351 /*
2352 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2353 * at the end of the module.
2354 */
2355- gp = mod->core_size - MAX_LTOFF / 2;
2356+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2357 else
2358- gp = mod->core_size / 2;
2359- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2360+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2361+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2362 mod->arch.gp = gp;
2363 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2364 }
2365diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
2366index f6b1ff0..de773fb 100644
2367--- a/arch/ia64/kernel/pci-dma.c
2368+++ b/arch/ia64/kernel/pci-dma.c
2369@@ -43,7 +43,7 @@ struct device fallback_dev = {
2370 .dma_mask = &fallback_dev.coherent_dma_mask,
2371 };
2372
2373-extern struct dma_map_ops intel_dma_ops;
2374+extern const struct dma_map_ops intel_dma_ops;
2375
2376 static int __init pci_iommu_init(void)
2377 {
2378@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
2379 }
2380 EXPORT_SYMBOL(iommu_dma_supported);
2381
2382+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
2383+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
2384+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
2385+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
2386+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
2387+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
2388+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
2389+
2390+static const struct dma_map_ops intel_iommu_dma_ops = {
2391+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
2392+ .alloc_coherent = intel_alloc_coherent,
2393+ .free_coherent = intel_free_coherent,
2394+ .map_sg = intel_map_sg,
2395+ .unmap_sg = intel_unmap_sg,
2396+ .map_page = intel_map_page,
2397+ .unmap_page = intel_unmap_page,
2398+ .mapping_error = intel_mapping_error,
2399+
2400+ .sync_single_for_cpu = machvec_dma_sync_single,
2401+ .sync_sg_for_cpu = machvec_dma_sync_sg,
2402+ .sync_single_for_device = machvec_dma_sync_single,
2403+ .sync_sg_for_device = machvec_dma_sync_sg,
2404+ .dma_supported = iommu_dma_supported,
2405+};
2406+
2407 void __init pci_iommu_alloc(void)
2408 {
2409- dma_ops = &intel_dma_ops;
2410-
2411- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
2412- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
2413- dma_ops->sync_single_for_device = machvec_dma_sync_single;
2414- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
2415- dma_ops->dma_supported = iommu_dma_supported;
2416+ dma_ops = &intel_iommu_dma_ops;
2417
2418 /*
2419 * The order of these functions is important for
2420diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
2421index 285aae8..61dbab6 100644
2422--- a/arch/ia64/kernel/pci-swiotlb.c
2423+++ b/arch/ia64/kernel/pci-swiotlb.c
2424@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
2425 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
2426 }
2427
2428-struct dma_map_ops swiotlb_dma_ops = {
2429+const struct dma_map_ops swiotlb_dma_ops = {
2430 .alloc_coherent = ia64_swiotlb_alloc_coherent,
2431 .free_coherent = swiotlb_free_coherent,
2432 .map_page = swiotlb_map_page,
2433diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2434index 609d500..7dde2a8 100644
2435--- a/arch/ia64/kernel/sys_ia64.c
2436+++ b/arch/ia64/kernel/sys_ia64.c
2437@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2438 if (REGION_NUMBER(addr) == RGN_HPAGE)
2439 addr = 0;
2440 #endif
2441+
2442+#ifdef CONFIG_PAX_RANDMMAP
2443+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2444+ addr = mm->free_area_cache;
2445+ else
2446+#endif
2447+
2448 if (!addr)
2449 addr = mm->free_area_cache;
2450
2451@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2452 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2453 /* At this point: (!vma || addr < vma->vm_end). */
2454 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2455- if (start_addr != TASK_UNMAPPED_BASE) {
2456+ if (start_addr != mm->mmap_base) {
2457 /* Start a new search --- just in case we missed some holes. */
2458- addr = TASK_UNMAPPED_BASE;
2459+ addr = mm->mmap_base;
2460 goto full_search;
2461 }
2462 return -ENOMEM;
2463 }
2464- if (!vma || addr + len <= vma->vm_start) {
2465+ if (check_heap_stack_gap(vma, addr, len)) {
2466 /* Remember the address where we stopped this search: */
2467 mm->free_area_cache = addr + len;
2468 return addr;
2469diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
2470index 8f06035..b3a5818 100644
2471--- a/arch/ia64/kernel/topology.c
2472+++ b/arch/ia64/kernel/topology.c
2473@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
2474 return ret;
2475 }
2476
2477-static struct sysfs_ops cache_sysfs_ops = {
2478+static const struct sysfs_ops cache_sysfs_ops = {
2479 .show = cache_show
2480 };
2481
2482diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2483index 0a0c77b..8e55a81 100644
2484--- a/arch/ia64/kernel/vmlinux.lds.S
2485+++ b/arch/ia64/kernel/vmlinux.lds.S
2486@@ -190,7 +190,7 @@ SECTIONS
2487 /* Per-cpu data: */
2488 . = ALIGN(PERCPU_PAGE_SIZE);
2489 PERCPU_VADDR(PERCPU_ADDR, :percpu)
2490- __phys_per_cpu_start = __per_cpu_load;
2491+ __phys_per_cpu_start = per_cpu_load;
2492 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
2493 * into percpu page size
2494 */
2495diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2496index 19261a9..1611b7a 100644
2497--- a/arch/ia64/mm/fault.c
2498+++ b/arch/ia64/mm/fault.c
2499@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
2500 return pte_present(pte);
2501 }
2502
2503+#ifdef CONFIG_PAX_PAGEEXEC
2504+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2505+{
2506+ unsigned long i;
2507+
2508+ printk(KERN_ERR "PAX: bytes at PC: ");
2509+ for (i = 0; i < 8; i++) {
2510+ unsigned int c;
2511+ if (get_user(c, (unsigned int *)pc+i))
2512+ printk(KERN_CONT "???????? ");
2513+ else
2514+ printk(KERN_CONT "%08x ", c);
2515+ }
2516+ printk("\n");
2517+}
2518+#endif
2519+
2520 void __kprobes
2521 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2522 {
2523@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2524 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2525 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2526
2527- if ((vma->vm_flags & mask) != mask)
2528+ if ((vma->vm_flags & mask) != mask) {
2529+
2530+#ifdef CONFIG_PAX_PAGEEXEC
2531+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2532+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2533+ goto bad_area;
2534+
2535+ up_read(&mm->mmap_sem);
2536+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2537+ do_group_exit(SIGKILL);
2538+ }
2539+#endif
2540+
2541 goto bad_area;
2542
2543+ }
2544+
2545 survive:
2546 /*
2547 * If for any reason at all we couldn't handle the fault, make
2548diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2549index b0f6157..a082bbc 100644
2550--- a/arch/ia64/mm/hugetlbpage.c
2551+++ b/arch/ia64/mm/hugetlbpage.c
2552@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2553 /* At this point: (!vmm || addr < vmm->vm_end). */
2554 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2555 return -ENOMEM;
2556- if (!vmm || (addr + len) <= vmm->vm_start)
2557+ if (check_heap_stack_gap(vmm, addr, len))
2558 return addr;
2559 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2560 }
2561diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2562index 1857766..05cc6a3 100644
2563--- a/arch/ia64/mm/init.c
2564+++ b/arch/ia64/mm/init.c
2565@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
2566 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2567 vma->vm_end = vma->vm_start + PAGE_SIZE;
2568 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2569+
2570+#ifdef CONFIG_PAX_PAGEEXEC
2571+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2572+ vma->vm_flags &= ~VM_EXEC;
2573+
2574+#ifdef CONFIG_PAX_MPROTECT
2575+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
2576+ vma->vm_flags &= ~VM_MAYEXEC;
2577+#endif
2578+
2579+ }
2580+#endif
2581+
2582 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2583 down_write(&current->mm->mmap_sem);
2584 if (insert_vm_struct(current->mm, vma)) {
2585diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
2586index 98b6849..8046766 100644
2587--- a/arch/ia64/sn/pci/pci_dma.c
2588+++ b/arch/ia64/sn/pci/pci_dma.c
2589@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
2590 return ret;
2591 }
2592
2593-static struct dma_map_ops sn_dma_ops = {
2594+static const struct dma_map_ops sn_dma_ops = {
2595 .alloc_coherent = sn_dma_alloc_coherent,
2596 .free_coherent = sn_dma_free_coherent,
2597 .map_page = sn_dma_map_page,
2598diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2599index 40b3ee9..8c2c112 100644
2600--- a/arch/m32r/include/asm/cache.h
2601+++ b/arch/m32r/include/asm/cache.h
2602@@ -1,8 +1,10 @@
2603 #ifndef _ASM_M32R_CACHE_H
2604 #define _ASM_M32R_CACHE_H
2605
2606+#include <linux/const.h>
2607+
2608 /* L1 cache line size */
2609 #define L1_CACHE_SHIFT 4
2610-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2611+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2612
2613 #endif /* _ASM_M32R_CACHE_H */
2614diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2615index 82abd15..d95ae5d 100644
2616--- a/arch/m32r/lib/usercopy.c
2617+++ b/arch/m32r/lib/usercopy.c
2618@@ -14,6 +14,9 @@
2619 unsigned long
2620 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2621 {
2622+ if ((long)n < 0)
2623+ return n;
2624+
2625 prefetch(from);
2626 if (access_ok(VERIFY_WRITE, to, n))
2627 __copy_user(to,from,n);
2628@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2629 unsigned long
2630 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2631 {
2632+ if ((long)n < 0)
2633+ return n;
2634+
2635 prefetchw(to);
2636 if (access_ok(VERIFY_READ, from, n))
2637 __copy_user_zeroing(to,from,n);
2638diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2639index ecafbe1..432c3e4 100644
2640--- a/arch/m68k/include/asm/cache.h
2641+++ b/arch/m68k/include/asm/cache.h
2642@@ -4,9 +4,11 @@
2643 #ifndef __ARCH_M68K_CACHE_H
2644 #define __ARCH_M68K_CACHE_H
2645
2646+#include <linux/const.h>
2647+
2648 /* bytes per L1 cache line */
2649 #define L1_CACHE_SHIFT 4
2650-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2651+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2652
2653 #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
2654
2655diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2656index c209c47..2ba96e2 100644
2657--- a/arch/microblaze/include/asm/cache.h
2658+++ b/arch/microblaze/include/asm/cache.h
2659@@ -13,11 +13,12 @@
2660 #ifndef _ASM_MICROBLAZE_CACHE_H
2661 #define _ASM_MICROBLAZE_CACHE_H
2662
2663+#include <linux/const.h>
2664 #include <asm/registers.h>
2665
2666 #define L1_CACHE_SHIFT 2
2667 /* word-granular cache in microblaze */
2668-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2669+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2670
2671 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2672
2673diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
2674index fd7620f..63d73a6 100644
2675--- a/arch/mips/Kconfig
2676+++ b/arch/mips/Kconfig
2677@@ -5,6 +5,7 @@ config MIPS
2678 select HAVE_IDE
2679 select HAVE_OPROFILE
2680 select HAVE_ARCH_KGDB
2681+ select GENERIC_ATOMIC64 if !64BIT
2682 # Horrible source of confusion. Die, die, die ...
2683 select EMBEDDED
2684 select RTC_LIB if !LEMOTE_FULOONG2E
2685diff --git a/arch/mips/Makefile b/arch/mips/Makefile
2686index 77f5021..2b1db8a 100644
2687--- a/arch/mips/Makefile
2688+++ b/arch/mips/Makefile
2689@@ -51,6 +51,8 @@ endif
2690 cflags-y := -ffunction-sections
2691 cflags-y += $(call cc-option, -mno-check-zero-division)
2692
2693+cflags-y += -Wno-sign-compare -Wno-extra
2694+
2695 ifdef CONFIG_32BIT
2696 ld-emul = $(32bit-emul)
2697 vmlinux-32 = vmlinux
2698diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
2699index 632f986..fd0378d 100644
2700--- a/arch/mips/alchemy/devboards/pm.c
2701+++ b/arch/mips/alchemy/devboards/pm.c
2702@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
2703
2704 }
2705
2706-static struct platform_suspend_ops db1x_pm_ops = {
2707+static const struct platform_suspend_ops db1x_pm_ops = {
2708 .valid = suspend_valid_only_mem,
2709 .begin = db1x_pm_begin,
2710 .enter = db1x_pm_enter,
2711diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2712index 09e7128..111035b 100644
2713--- a/arch/mips/include/asm/atomic.h
2714+++ b/arch/mips/include/asm/atomic.h
2715@@ -21,6 +21,10 @@
2716 #include <asm/war.h>
2717 #include <asm/system.h>
2718
2719+#ifdef CONFIG_GENERIC_ATOMIC64
2720+#include <asm-generic/atomic64.h>
2721+#endif
2722+
2723 #define ATOMIC_INIT(i) { (i) }
2724
2725 /*
2726@@ -782,6 +786,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2727 */
2728 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2729
2730+#define atomic64_read_unchecked(v) atomic64_read(v)
2731+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2732+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2733+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2734+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2735+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2736+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2737+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2738+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2739+
2740 #endif /* CONFIG_64BIT */
2741
2742 /*
2743diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2744index 37f175c..c7a3065 100644
2745--- a/arch/mips/include/asm/cache.h
2746+++ b/arch/mips/include/asm/cache.h
2747@@ -9,10 +9,11 @@
2748 #ifndef _ASM_CACHE_H
2749 #define _ASM_CACHE_H
2750
2751+#include <linux/const.h>
2752 #include <kmalloc.h>
2753
2754 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2755-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2756+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2757
2758 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2759 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2760diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2761index 7990694..4e93acf 100644
2762--- a/arch/mips/include/asm/elf.h
2763+++ b/arch/mips/include/asm/elf.h
2764@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
2765 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2766 #endif
2767
2768+#ifdef CONFIG_PAX_ASLR
2769+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2770+
2771+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2772+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2773+#endif
2774+
2775 #endif /* _ASM_ELF_H */
2776diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2777index f266295..627cfff 100644
2778--- a/arch/mips/include/asm/page.h
2779+++ b/arch/mips/include/asm/page.h
2780@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2781 #ifdef CONFIG_CPU_MIPS32
2782 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2783 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2784- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2785+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2786 #else
2787 typedef struct { unsigned long long pte; } pte_t;
2788 #define pte_val(x) ((x).pte)
2789diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
2790index e48c0bf..f3acf65 100644
2791--- a/arch/mips/include/asm/reboot.h
2792+++ b/arch/mips/include/asm/reboot.h
2793@@ -9,7 +9,7 @@
2794 #ifndef _ASM_REBOOT_H
2795 #define _ASM_REBOOT_H
2796
2797-extern void (*_machine_restart)(char *command);
2798-extern void (*_machine_halt)(void);
2799+extern void (*__noreturn _machine_restart)(char *command);
2800+extern void (*__noreturn _machine_halt)(void);
2801
2802 #endif /* _ASM_REBOOT_H */
2803diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2804index 83b5509..9fa24a23 100644
2805--- a/arch/mips/include/asm/system.h
2806+++ b/arch/mips/include/asm/system.h
2807@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2808 */
2809 #define __ARCH_WANT_UNLOCKED_CTXSW
2810
2811-extern unsigned long arch_align_stack(unsigned long sp);
2812+#define arch_align_stack(x) ((x) & ~0xfUL)
2813
2814 #endif /* _ASM_SYSTEM_H */
2815diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2816index 9fdd8bc..fcf9d68 100644
2817--- a/arch/mips/kernel/binfmt_elfn32.c
2818+++ b/arch/mips/kernel/binfmt_elfn32.c
2819@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2820 #undef ELF_ET_DYN_BASE
2821 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2822
2823+#ifdef CONFIG_PAX_ASLR
2824+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2825+
2826+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2827+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2828+#endif
2829+
2830 #include <asm/processor.h>
2831 #include <linux/module.h>
2832 #include <linux/elfcore.h>
2833diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2834index ff44823..cf0b48a 100644
2835--- a/arch/mips/kernel/binfmt_elfo32.c
2836+++ b/arch/mips/kernel/binfmt_elfo32.c
2837@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2838 #undef ELF_ET_DYN_BASE
2839 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2840
2841+#ifdef CONFIG_PAX_ASLR
2842+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2843+
2844+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2845+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2846+#endif
2847+
2848 #include <asm/processor.h>
2849
2850 /*
2851diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
2852index 50c9bb8..efdd5f8 100644
2853--- a/arch/mips/kernel/kgdb.c
2854+++ b/arch/mips/kernel/kgdb.c
2855@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
2856 return -1;
2857 }
2858
2859+/* cannot be const */
2860 struct kgdb_arch arch_kgdb_ops;
2861
2862 /*
2863diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2864index f3d73e1..bb3f57a 100644
2865--- a/arch/mips/kernel/process.c
2866+++ b/arch/mips/kernel/process.c
2867@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
2868 out:
2869 return pc;
2870 }
2871-
2872-/*
2873- * Don't forget that the stack pointer must be aligned on a 8 bytes
2874- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2875- */
2876-unsigned long arch_align_stack(unsigned long sp)
2877-{
2878- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2879- sp -= get_random_int() & ~PAGE_MASK;
2880-
2881- return sp & ALMASK;
2882-}
2883diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
2884index 060563a..7fbf310 100644
2885--- a/arch/mips/kernel/reset.c
2886+++ b/arch/mips/kernel/reset.c
2887@@ -19,8 +19,8 @@
2888 * So handle all using function pointers to machine specific
2889 * functions.
2890 */
2891-void (*_machine_restart)(char *command);
2892-void (*_machine_halt)(void);
2893+void (*__noreturn _machine_restart)(char *command);
2894+void (*__noreturn _machine_halt)(void);
2895 void (*pm_power_off)(void);
2896
2897 EXPORT_SYMBOL(pm_power_off);
2898@@ -29,16 +29,19 @@ void machine_restart(char *command)
2899 {
2900 if (_machine_restart)
2901 _machine_restart(command);
2902+ BUG();
2903 }
2904
2905 void machine_halt(void)
2906 {
2907 if (_machine_halt)
2908 _machine_halt();
2909+ BUG();
2910 }
2911
2912 void machine_power_off(void)
2913 {
2914 if (pm_power_off)
2915 pm_power_off();
2916+ BUG();
2917 }
2918diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
2919index 3f7f466..3abe0b5 100644
2920--- a/arch/mips/kernel/syscall.c
2921+++ b/arch/mips/kernel/syscall.c
2922@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2923 do_color_align = 0;
2924 if (filp || (flags & MAP_SHARED))
2925 do_color_align = 1;
2926+
2927+#ifdef CONFIG_PAX_RANDMMAP
2928+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2929+#endif
2930+
2931 if (addr) {
2932 if (do_color_align)
2933 addr = COLOUR_ALIGN(addr, pgoff);
2934 else
2935 addr = PAGE_ALIGN(addr);
2936 vmm = find_vma(current->mm, addr);
2937- if (task_size - len >= addr &&
2938- (!vmm || addr + len <= vmm->vm_start))
2939+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
2940 return addr;
2941 }
2942- addr = TASK_UNMAPPED_BASE;
2943+ addr = current->mm->mmap_base;
2944 if (do_color_align)
2945 addr = COLOUR_ALIGN(addr, pgoff);
2946 else
2947@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2948 /* At this point: (!vmm || addr < vmm->vm_end). */
2949 if (task_size - len < addr)
2950 return -ENOMEM;
2951- if (!vmm || addr + len <= vmm->vm_start)
2952+ if (check_heap_stack_gap(vmm, addr, len))
2953 return addr;
2954 addr = vmm->vm_end;
2955 if (do_color_align)
2956diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2957index e97a7a2..f18f5b0 100644
2958--- a/arch/mips/mm/fault.c
2959+++ b/arch/mips/mm/fault.c
2960@@ -26,6 +26,23 @@
2961 #include <asm/ptrace.h>
2962 #include <asm/highmem.h> /* For VMALLOC_END */
2963
2964+#ifdef CONFIG_PAX_PAGEEXEC
2965+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2966+{
2967+ unsigned long i;
2968+
2969+ printk(KERN_ERR "PAX: bytes at PC: ");
2970+ for (i = 0; i < 5; i++) {
2971+ unsigned int c;
2972+ if (get_user(c, (unsigned int *)pc+i))
2973+ printk(KERN_CONT "???????? ");
2974+ else
2975+ printk(KERN_CONT "%08x ", c);
2976+ }
2977+ printk("\n");
2978+}
2979+#endif
2980+
2981 /*
2982 * This routine handles page faults. It determines the address,
2983 * and the problem, and then passes it off to one of the appropriate
2984diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2985index bdc1f9a..e8de5c5 100644
2986--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
2987+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2988@@ -11,12 +11,14 @@
2989 #ifndef _ASM_PROC_CACHE_H
2990 #define _ASM_PROC_CACHE_H
2991
2992+#include <linux/const.h>
2993+
2994 /* L1 cache */
2995
2996 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2997 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
2998-#define L1_CACHE_BYTES 16 /* bytes per entry */
2999 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
3000+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3001 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
3002
3003 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3004diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
3005index 8bc9e96..26554f8 100644
3006--- a/arch/parisc/include/asm/atomic.h
3007+++ b/arch/parisc/include/asm/atomic.h
3008@@ -336,6 +336,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3009
3010 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3011
3012+#define atomic64_read_unchecked(v) atomic64_read(v)
3013+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3014+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3015+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3016+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3017+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3018+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3019+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3020+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3021+
3022 #else /* CONFIG_64BIT */
3023
3024 #include <asm-generic/atomic64.h>
3025diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3026index 32c2cca..a7b3a64 100644
3027--- a/arch/parisc/include/asm/cache.h
3028+++ b/arch/parisc/include/asm/cache.h
3029@@ -5,6 +5,7 @@
3030 #ifndef __ARCH_PARISC_CACHE_H
3031 #define __ARCH_PARISC_CACHE_H
3032
3033+#include <linux/const.h>
3034
3035 /*
3036 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3037@@ -15,13 +16,13 @@
3038 * just ruin performance.
3039 */
3040 #ifdef CONFIG_PA20
3041-#define L1_CACHE_BYTES 64
3042 #define L1_CACHE_SHIFT 6
3043 #else
3044-#define L1_CACHE_BYTES 32
3045 #define L1_CACHE_SHIFT 5
3046 #endif
3047
3048+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3049+
3050 #ifndef __ASSEMBLY__
3051
3052 #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
3053diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3054index 9c802eb..0592e41 100644
3055--- a/arch/parisc/include/asm/elf.h
3056+++ b/arch/parisc/include/asm/elf.h
3057@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
3058
3059 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3060
3061+#ifdef CONFIG_PAX_ASLR
3062+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3063+
3064+#define PAX_DELTA_MMAP_LEN 16
3065+#define PAX_DELTA_STACK_LEN 16
3066+#endif
3067+
3068 /* This yields a mask that user programs can use to figure out what
3069 instruction set this CPU supports. This could be done in user space,
3070 but it's not easy, and we've already done it here. */
3071diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3072index a27d2e2..18fd845 100644
3073--- a/arch/parisc/include/asm/pgtable.h
3074+++ b/arch/parisc/include/asm/pgtable.h
3075@@ -207,6 +207,17 @@
3076 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3077 #define PAGE_COPY PAGE_EXECREAD
3078 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3079+
3080+#ifdef CONFIG_PAX_PAGEEXEC
3081+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3082+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3083+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3084+#else
3085+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3086+# define PAGE_COPY_NOEXEC PAGE_COPY
3087+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3088+#endif
3089+
3090 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3091 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
3092 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
3093diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3094index 2120746..8d70a5e 100644
3095--- a/arch/parisc/kernel/module.c
3096+++ b/arch/parisc/kernel/module.c
3097@@ -95,16 +95,38 @@
3098
3099 /* three functions to determine where in the module core
3100 * or init pieces the location is */
3101+static inline int in_init_rx(struct module *me, void *loc)
3102+{
3103+ return (loc >= me->module_init_rx &&
3104+ loc < (me->module_init_rx + me->init_size_rx));
3105+}
3106+
3107+static inline int in_init_rw(struct module *me, void *loc)
3108+{
3109+ return (loc >= me->module_init_rw &&
3110+ loc < (me->module_init_rw + me->init_size_rw));
3111+}
3112+
3113 static inline int in_init(struct module *me, void *loc)
3114 {
3115- return (loc >= me->module_init &&
3116- loc <= (me->module_init + me->init_size));
3117+ return in_init_rx(me, loc) || in_init_rw(me, loc);
3118+}
3119+
3120+static inline int in_core_rx(struct module *me, void *loc)
3121+{
3122+ return (loc >= me->module_core_rx &&
3123+ loc < (me->module_core_rx + me->core_size_rx));
3124+}
3125+
3126+static inline int in_core_rw(struct module *me, void *loc)
3127+{
3128+ return (loc >= me->module_core_rw &&
3129+ loc < (me->module_core_rw + me->core_size_rw));
3130 }
3131
3132 static inline int in_core(struct module *me, void *loc)
3133 {
3134- return (loc >= me->module_core &&
3135- loc <= (me->module_core + me->core_size));
3136+ return in_core_rx(me, loc) || in_core_rw(me, loc);
3137 }
3138
3139 static inline int in_local(struct module *me, void *loc)
3140@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3141 }
3142
3143 /* align things a bit */
3144- me->core_size = ALIGN(me->core_size, 16);
3145- me->arch.got_offset = me->core_size;
3146- me->core_size += gots * sizeof(struct got_entry);
3147+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3148+ me->arch.got_offset = me->core_size_rw;
3149+ me->core_size_rw += gots * sizeof(struct got_entry);
3150
3151- me->core_size = ALIGN(me->core_size, 16);
3152- me->arch.fdesc_offset = me->core_size;
3153- me->core_size += fdescs * sizeof(Elf_Fdesc);
3154+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3155+ me->arch.fdesc_offset = me->core_size_rw;
3156+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3157
3158 me->arch.got_max = gots;
3159 me->arch.fdesc_max = fdescs;
3160@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3161
3162 BUG_ON(value == 0);
3163
3164- got = me->module_core + me->arch.got_offset;
3165+ got = me->module_core_rw + me->arch.got_offset;
3166 for (i = 0; got[i].addr; i++)
3167 if (got[i].addr == value)
3168 goto out;
3169@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3170 #ifdef CONFIG_64BIT
3171 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3172 {
3173- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3174+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3175
3176 if (!value) {
3177 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3178@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3179
3180 /* Create new one */
3181 fdesc->addr = value;
3182- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3183+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3184 return (Elf_Addr)fdesc;
3185 }
3186 #endif /* CONFIG_64BIT */
3187@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
3188
3189 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3190 end = table + sechdrs[me->arch.unwind_section].sh_size;
3191- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3192+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3193
3194 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3195 me->arch.unwind_section, table, end, gp);
3196diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3197index 9147391..f3d949a 100644
3198--- a/arch/parisc/kernel/sys_parisc.c
3199+++ b/arch/parisc/kernel/sys_parisc.c
3200@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3201 /* At this point: (!vma || addr < vma->vm_end). */
3202 if (TASK_SIZE - len < addr)
3203 return -ENOMEM;
3204- if (!vma || addr + len <= vma->vm_start)
3205+ if (check_heap_stack_gap(vma, addr, len))
3206 return addr;
3207 addr = vma->vm_end;
3208 }
3209@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3210 /* At this point: (!vma || addr < vma->vm_end). */
3211 if (TASK_SIZE - len < addr)
3212 return -ENOMEM;
3213- if (!vma || addr + len <= vma->vm_start)
3214+ if (check_heap_stack_gap(vma, addr, len))
3215 return addr;
3216 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3217 if (addr < vma->vm_end) /* handle wraparound */
3218@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3219 if (flags & MAP_FIXED)
3220 return addr;
3221 if (!addr)
3222- addr = TASK_UNMAPPED_BASE;
3223+ addr = current->mm->mmap_base;
3224
3225 if (filp) {
3226 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3227diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3228index 8b58bf0..7afff03 100644
3229--- a/arch/parisc/kernel/traps.c
3230+++ b/arch/parisc/kernel/traps.c
3231@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3232
3233 down_read(&current->mm->mmap_sem);
3234 vma = find_vma(current->mm,regs->iaoq[0]);
3235- if (vma && (regs->iaoq[0] >= vma->vm_start)
3236- && (vma->vm_flags & VM_EXEC)) {
3237-
3238+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3239 fault_address = regs->iaoq[0];
3240 fault_space = regs->iasq[0];
3241
3242diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3243index c6afbfc..c5839f6 100644
3244--- a/arch/parisc/mm/fault.c
3245+++ b/arch/parisc/mm/fault.c
3246@@ -15,6 +15,7 @@
3247 #include <linux/sched.h>
3248 #include <linux/interrupt.h>
3249 #include <linux/module.h>
3250+#include <linux/unistd.h>
3251
3252 #include <asm/uaccess.h>
3253 #include <asm/traps.h>
3254@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3255 static unsigned long
3256 parisc_acctyp(unsigned long code, unsigned int inst)
3257 {
3258- if (code == 6 || code == 16)
3259+ if (code == 6 || code == 7 || code == 16)
3260 return VM_EXEC;
3261
3262 switch (inst & 0xf0000000) {
3263@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3264 }
3265 #endif
3266
3267+#ifdef CONFIG_PAX_PAGEEXEC
3268+/*
3269+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3270+ *
3271+ * returns 1 when task should be killed
3272+ * 2 when rt_sigreturn trampoline was detected
3273+ * 3 when unpatched PLT trampoline was detected
3274+ */
3275+static int pax_handle_fetch_fault(struct pt_regs *regs)
3276+{
3277+
3278+#ifdef CONFIG_PAX_EMUPLT
3279+ int err;
3280+
3281+ do { /* PaX: unpatched PLT emulation */
3282+ unsigned int bl, depwi;
3283+
3284+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3285+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3286+
3287+ if (err)
3288+ break;
3289+
3290+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3291+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3292+
3293+ err = get_user(ldw, (unsigned int *)addr);
3294+ err |= get_user(bv, (unsigned int *)(addr+4));
3295+ err |= get_user(ldw2, (unsigned int *)(addr+8));
3296+
3297+ if (err)
3298+ break;
3299+
3300+ if (ldw == 0x0E801096U &&
3301+ bv == 0xEAC0C000U &&
3302+ ldw2 == 0x0E881095U)
3303+ {
3304+ unsigned int resolver, map;
3305+
3306+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3307+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3308+ if (err)
3309+ break;
3310+
3311+ regs->gr[20] = instruction_pointer(regs)+8;
3312+ regs->gr[21] = map;
3313+ regs->gr[22] = resolver;
3314+ regs->iaoq[0] = resolver | 3UL;
3315+ regs->iaoq[1] = regs->iaoq[0] + 4;
3316+ return 3;
3317+ }
3318+ }
3319+ } while (0);
3320+#endif
3321+
3322+#ifdef CONFIG_PAX_EMUTRAMP
3323+
3324+#ifndef CONFIG_PAX_EMUSIGRT
3325+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3326+ return 1;
3327+#endif
3328+
3329+ do { /* PaX: rt_sigreturn emulation */
3330+ unsigned int ldi1, ldi2, bel, nop;
3331+
3332+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3333+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3334+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3335+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3336+
3337+ if (err)
3338+ break;
3339+
3340+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3341+ ldi2 == 0x3414015AU &&
3342+ bel == 0xE4008200U &&
3343+ nop == 0x08000240U)
3344+ {
3345+ regs->gr[25] = (ldi1 & 2) >> 1;
3346+ regs->gr[20] = __NR_rt_sigreturn;
3347+ regs->gr[31] = regs->iaoq[1] + 16;
3348+ regs->sr[0] = regs->iasq[1];
3349+ regs->iaoq[0] = 0x100UL;
3350+ regs->iaoq[1] = regs->iaoq[0] + 4;
3351+ regs->iasq[0] = regs->sr[2];
3352+ regs->iasq[1] = regs->sr[2];
3353+ return 2;
3354+ }
3355+ } while (0);
3356+#endif
3357+
3358+ return 1;
3359+}
3360+
3361+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3362+{
3363+ unsigned long i;
3364+
3365+ printk(KERN_ERR "PAX: bytes at PC: ");
3366+ for (i = 0; i < 5; i++) {
3367+ unsigned int c;
3368+ if (get_user(c, (unsigned int *)pc+i))
3369+ printk(KERN_CONT "???????? ");
3370+ else
3371+ printk(KERN_CONT "%08x ", c);
3372+ }
3373+ printk("\n");
3374+}
3375+#endif
3376+
3377 int fixup_exception(struct pt_regs *regs)
3378 {
3379 const struct exception_table_entry *fix;
3380@@ -192,8 +303,33 @@ good_area:
3381
3382 acc_type = parisc_acctyp(code,regs->iir);
3383
3384- if ((vma->vm_flags & acc_type) != acc_type)
3385+ if ((vma->vm_flags & acc_type) != acc_type) {
3386+
3387+#ifdef CONFIG_PAX_PAGEEXEC
3388+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3389+ (address & ~3UL) == instruction_pointer(regs))
3390+ {
3391+ up_read(&mm->mmap_sem);
3392+ switch (pax_handle_fetch_fault(regs)) {
3393+
3394+#ifdef CONFIG_PAX_EMUPLT
3395+ case 3:
3396+ return;
3397+#endif
3398+
3399+#ifdef CONFIG_PAX_EMUTRAMP
3400+ case 2:
3401+ return;
3402+#endif
3403+
3404+ }
3405+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3406+ do_group_exit(SIGKILL);
3407+ }
3408+#endif
3409+
3410 goto bad_area;
3411+ }
3412
3413 /*
3414 * If for any reason at all we couldn't handle the fault, make
3415diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
3416index c107b74..409dc0f 100644
3417--- a/arch/powerpc/Makefile
3418+++ b/arch/powerpc/Makefile
3419@@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
3420 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
3421 CPP = $(CC) -E $(KBUILD_CFLAGS)
3422
3423+cflags-y += -Wno-sign-compare -Wno-extra
3424+
3425 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
3426
3427 ifeq ($(CONFIG_PPC64),y)
3428diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3429index 81de6eb..d5d0e24 100644
3430--- a/arch/powerpc/include/asm/cache.h
3431+++ b/arch/powerpc/include/asm/cache.h
3432@@ -3,6 +3,7 @@
3433
3434 #ifdef __KERNEL__
3435
3436+#include <linux/const.h>
3437
3438 /* bytes per L1 cache line */
3439 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3440@@ -18,7 +19,7 @@
3441 #define L1_CACHE_SHIFT 7
3442 #endif
3443
3444-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3445+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3446
3447 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3448
3449diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
3450index 6d94d27..50d4cad 100644
3451--- a/arch/powerpc/include/asm/device.h
3452+++ b/arch/powerpc/include/asm/device.h
3453@@ -14,7 +14,7 @@ struct dev_archdata {
3454 struct device_node *of_node;
3455
3456 /* DMA operations on that device */
3457- struct dma_map_ops *dma_ops;
3458+ const struct dma_map_ops *dma_ops;
3459
3460 /*
3461 * When an iommu is in use, dma_data is used as a ptr to the base of the
3462diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
3463index e281dae..2b8a784 100644
3464--- a/arch/powerpc/include/asm/dma-mapping.h
3465+++ b/arch/powerpc/include/asm/dma-mapping.h
3466@@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
3467 #ifdef CONFIG_PPC64
3468 extern struct dma_map_ops dma_iommu_ops;
3469 #endif
3470-extern struct dma_map_ops dma_direct_ops;
3471+extern const struct dma_map_ops dma_direct_ops;
3472
3473-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3474+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3475 {
3476 /* We don't handle the NULL dev case for ISA for now. We could
3477 * do it via an out of line call but it is not needed for now. The
3478@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3479 return dev->archdata.dma_ops;
3480 }
3481
3482-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
3483+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
3484 {
3485 dev->archdata.dma_ops = ops;
3486 }
3487@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
3488
3489 static inline int dma_supported(struct device *dev, u64 mask)
3490 {
3491- struct dma_map_ops *dma_ops = get_dma_ops(dev);
3492+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3493
3494 if (unlikely(dma_ops == NULL))
3495 return 0;
3496@@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
3497
3498 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
3499 {
3500- struct dma_map_ops *dma_ops = get_dma_ops(dev);
3501+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3502
3503 if (unlikely(dma_ops == NULL))
3504 return -EIO;
3505@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
3506 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3507 dma_addr_t *dma_handle, gfp_t flag)
3508 {
3509- struct dma_map_ops *dma_ops = get_dma_ops(dev);
3510+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3511 void *cpu_addr;
3512
3513 BUG_ON(!dma_ops);
3514@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3515 static inline void dma_free_coherent(struct device *dev, size_t size,
3516 void *cpu_addr, dma_addr_t dma_handle)
3517 {
3518- struct dma_map_ops *dma_ops = get_dma_ops(dev);
3519+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3520
3521 BUG_ON(!dma_ops);
3522
3523@@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
3524
3525 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
3526 {
3527- struct dma_map_ops *dma_ops = get_dma_ops(dev);
3528+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3529
3530 if (dma_ops->mapping_error)
3531 return dma_ops->mapping_error(dev, dma_addr);
3532diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3533index 5698502..5db093c 100644
3534--- a/arch/powerpc/include/asm/elf.h
3535+++ b/arch/powerpc/include/asm/elf.h
3536@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3537 the loader. We need to make sure that it is out of the way of the program
3538 that it will "exec", and that there is sufficient room for the brk. */
3539
3540-extern unsigned long randomize_et_dyn(unsigned long base);
3541-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3542+#define ELF_ET_DYN_BASE (0x20000000)
3543+
3544+#ifdef CONFIG_PAX_ASLR
3545+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3546+
3547+#ifdef __powerpc64__
3548+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
3549+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
3550+#else
3551+#define PAX_DELTA_MMAP_LEN 15
3552+#define PAX_DELTA_STACK_LEN 15
3553+#endif
3554+#endif
3555
3556 /*
3557 * Our registers are always unsigned longs, whether we're a 32 bit
3558@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3559 (0x7ff >> (PAGE_SHIFT - 12)) : \
3560 (0x3ffff >> (PAGE_SHIFT - 12)))
3561
3562-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3563-#define arch_randomize_brk arch_randomize_brk
3564-
3565 #endif /* __KERNEL__ */
3566
3567 /*
3568diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
3569index edfc980..1766f59 100644
3570--- a/arch/powerpc/include/asm/iommu.h
3571+++ b/arch/powerpc/include/asm/iommu.h
3572@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
3573 extern void iommu_init_early_dart(void);
3574 extern void iommu_init_early_pasemi(void);
3575
3576+/* dma-iommu.c */
3577+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
3578+
3579 #ifdef CONFIG_PCI
3580 extern void pci_iommu_init(void);
3581 extern void pci_direct_iommu_init(void);
3582diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3583index 9163695..5a00112 100644
3584--- a/arch/powerpc/include/asm/kmap_types.h
3585+++ b/arch/powerpc/include/asm/kmap_types.h
3586@@ -26,6 +26,7 @@ enum km_type {
3587 KM_SOFTIRQ1,
3588 KM_PPC_SYNC_PAGE,
3589 KM_PPC_SYNC_ICACHE,
3590+ KM_CLEARPAGE,
3591 KM_TYPE_NR
3592 };
3593
3594diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3595index ff24254..fe45b21 100644
3596--- a/arch/powerpc/include/asm/page.h
3597+++ b/arch/powerpc/include/asm/page.h
3598@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
3599 * and needs to be executable. This means the whole heap ends
3600 * up being executable.
3601 */
3602-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3603- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3604+#define VM_DATA_DEFAULT_FLAGS32 \
3605+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3606+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3607
3608 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3609 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3610@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
3611 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3612 #endif
3613
3614+#define ktla_ktva(addr) (addr)
3615+#define ktva_ktla(addr) (addr)
3616+
3617 #ifndef __ASSEMBLY__
3618
3619 #undef STRICT_MM_TYPECHECKS
3620diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3621index 3f17b83..1f9e766 100644
3622--- a/arch/powerpc/include/asm/page_64.h
3623+++ b/arch/powerpc/include/asm/page_64.h
3624@@ -180,15 +180,18 @@ do { \
3625 * stack by default, so in the absense of a PT_GNU_STACK program header
3626 * we turn execute permission off.
3627 */
3628-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3629- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3630+#define VM_STACK_DEFAULT_FLAGS32 \
3631+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3632+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3633
3634 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3635 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3636
3637+#ifndef CONFIG_PAX_PAGEEXEC
3638 #define VM_STACK_DEFAULT_FLAGS \
3639 (test_thread_flag(TIF_32BIT) ? \
3640 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3641+#endif
3642
3643 #include <asm-generic/getorder.h>
3644
3645diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
3646index b5ea626..40308222 100644
3647--- a/arch/powerpc/include/asm/pci.h
3648+++ b/arch/powerpc/include/asm/pci.h
3649@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
3650 }
3651
3652 #ifdef CONFIG_PCI
3653-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
3654-extern struct dma_map_ops *get_pci_dma_ops(void);
3655+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
3656+extern const struct dma_map_ops *get_pci_dma_ops(void);
3657 #else /* CONFIG_PCI */
3658 #define set_pci_dma_ops(d)
3659 #define get_pci_dma_ops() NULL
3660diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3661index 2a5da06..d65bea2 100644
3662--- a/arch/powerpc/include/asm/pgtable.h
3663+++ b/arch/powerpc/include/asm/pgtable.h
3664@@ -2,6 +2,7 @@
3665 #define _ASM_POWERPC_PGTABLE_H
3666 #ifdef __KERNEL__
3667
3668+#include <linux/const.h>
3669 #ifndef __ASSEMBLY__
3670 #include <asm/processor.h> /* For TASK_SIZE */
3671 #include <asm/mmu.h>
3672diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3673index 4aad413..85d86bf 100644
3674--- a/arch/powerpc/include/asm/pte-hash32.h
3675+++ b/arch/powerpc/include/asm/pte-hash32.h
3676@@ -21,6 +21,7 @@
3677 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3678 #define _PAGE_USER 0x004 /* usermode access allowed */
3679 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3680+#define _PAGE_EXEC _PAGE_GUARDED
3681 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3682 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3683 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3684diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
3685index 8c34149..78f425a 100644
3686--- a/arch/powerpc/include/asm/ptrace.h
3687+++ b/arch/powerpc/include/asm/ptrace.h
3688@@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
3689 } while(0)
3690
3691 struct task_struct;
3692-extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
3693+extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
3694 extern int ptrace_put_reg(struct task_struct *task, int regno,
3695 unsigned long data);
3696
3697diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3698index 32a7c30..be3a8bb 100644
3699--- a/arch/powerpc/include/asm/reg.h
3700+++ b/arch/powerpc/include/asm/reg.h
3701@@ -191,6 +191,7 @@
3702 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3703 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3704 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3705+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3706 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3707 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3708 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3709diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
3710index 8979d4c..d2fd0d3 100644
3711--- a/arch/powerpc/include/asm/swiotlb.h
3712+++ b/arch/powerpc/include/asm/swiotlb.h
3713@@ -13,7 +13,7 @@
3714
3715 #include <linux/swiotlb.h>
3716
3717-extern struct dma_map_ops swiotlb_dma_ops;
3718+extern const struct dma_map_ops swiotlb_dma_ops;
3719
3720 static inline void dma_mark_clean(void *addr, size_t size) {}
3721
3722diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
3723index 094a12a..877a60a 100644
3724--- a/arch/powerpc/include/asm/system.h
3725+++ b/arch/powerpc/include/asm/system.h
3726@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
3727 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
3728 #endif
3729
3730-extern unsigned long arch_align_stack(unsigned long sp);
3731+#define arch_align_stack(x) ((x) & ~0xfUL)
3732
3733 /* Used in very early kernel initialization. */
3734 extern unsigned long reloc_offset(void);
3735diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3736index bd0fb84..a42a14b 100644
3737--- a/arch/powerpc/include/asm/uaccess.h
3738+++ b/arch/powerpc/include/asm/uaccess.h
3739@@ -13,6 +13,8 @@
3740 #define VERIFY_READ 0
3741 #define VERIFY_WRITE 1
3742
3743+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3744+
3745 /*
3746 * The fs value determines whether argument validity checking should be
3747 * performed or not. If get_fs() == USER_DS, checking is performed, with
3748@@ -327,52 +329,6 @@ do { \
3749 extern unsigned long __copy_tofrom_user(void __user *to,
3750 const void __user *from, unsigned long size);
3751
3752-#ifndef __powerpc64__
3753-
3754-static inline unsigned long copy_from_user(void *to,
3755- const void __user *from, unsigned long n)
3756-{
3757- unsigned long over;
3758-
3759- if (access_ok(VERIFY_READ, from, n))
3760- return __copy_tofrom_user((__force void __user *)to, from, n);
3761- if ((unsigned long)from < TASK_SIZE) {
3762- over = (unsigned long)from + n - TASK_SIZE;
3763- return __copy_tofrom_user((__force void __user *)to, from,
3764- n - over) + over;
3765- }
3766- return n;
3767-}
3768-
3769-static inline unsigned long copy_to_user(void __user *to,
3770- const void *from, unsigned long n)
3771-{
3772- unsigned long over;
3773-
3774- if (access_ok(VERIFY_WRITE, to, n))
3775- return __copy_tofrom_user(to, (__force void __user *)from, n);
3776- if ((unsigned long)to < TASK_SIZE) {
3777- over = (unsigned long)to + n - TASK_SIZE;
3778- return __copy_tofrom_user(to, (__force void __user *)from,
3779- n - over) + over;
3780- }
3781- return n;
3782-}
3783-
3784-#else /* __powerpc64__ */
3785-
3786-#define __copy_in_user(to, from, size) \
3787- __copy_tofrom_user((to), (from), (size))
3788-
3789-extern unsigned long copy_from_user(void *to, const void __user *from,
3790- unsigned long n);
3791-extern unsigned long copy_to_user(void __user *to, const void *from,
3792- unsigned long n);
3793-extern unsigned long copy_in_user(void __user *to, const void __user *from,
3794- unsigned long n);
3795-
3796-#endif /* __powerpc64__ */
3797-
3798 static inline unsigned long __copy_from_user_inatomic(void *to,
3799 const void __user *from, unsigned long n)
3800 {
3801@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3802 if (ret == 0)
3803 return 0;
3804 }
3805+
3806+ if (!__builtin_constant_p(n))
3807+ check_object_size(to, n, false);
3808+
3809 return __copy_tofrom_user((__force void __user *)to, from, n);
3810 }
3811
3812@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3813 if (ret == 0)
3814 return 0;
3815 }
3816+
3817+ if (!__builtin_constant_p(n))
3818+ check_object_size(from, n, true);
3819+
3820 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3821 }
3822
3823@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3824 return __copy_to_user_inatomic(to, from, size);
3825 }
3826
3827+#ifndef __powerpc64__
3828+
3829+static inline unsigned long __must_check copy_from_user(void *to,
3830+ const void __user *from, unsigned long n)
3831+{
3832+ unsigned long over;
3833+
3834+ if ((long)n < 0)
3835+ return n;
3836+
3837+ if (access_ok(VERIFY_READ, from, n)) {
3838+ if (!__builtin_constant_p(n))
3839+ check_object_size(to, n, false);
3840+ return __copy_tofrom_user((__force void __user *)to, from, n);
3841+ }
3842+ if ((unsigned long)from < TASK_SIZE) {
3843+ over = (unsigned long)from + n - TASK_SIZE;
3844+ if (!__builtin_constant_p(n - over))
3845+ check_object_size(to, n - over, false);
3846+ return __copy_tofrom_user((__force void __user *)to, from,
3847+ n - over) + over;
3848+ }
3849+ return n;
3850+}
3851+
3852+static inline unsigned long __must_check copy_to_user(void __user *to,
3853+ const void *from, unsigned long n)
3854+{
3855+ unsigned long over;
3856+
3857+ if ((long)n < 0)
3858+ return n;
3859+
3860+ if (access_ok(VERIFY_WRITE, to, n)) {
3861+ if (!__builtin_constant_p(n))
3862+ check_object_size(from, n, true);
3863+ return __copy_tofrom_user(to, (__force void __user *)from, n);
3864+ }
3865+ if ((unsigned long)to < TASK_SIZE) {
3866+ over = (unsigned long)to + n - TASK_SIZE;
3867+ if (!__builtin_constant_p(n))
3868+ check_object_size(from, n - over, true);
3869+ return __copy_tofrom_user(to, (__force void __user *)from,
3870+ n - over) + over;
3871+ }
3872+ return n;
3873+}
3874+
3875+#else /* __powerpc64__ */
3876+
3877+#define __copy_in_user(to, from, size) \
3878+ __copy_tofrom_user((to), (from), (size))
3879+
3880+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
3881+{
3882+ if ((long)n < 0 || n > INT_MAX)
3883+ return n;
3884+
3885+ if (!__builtin_constant_p(n))
3886+ check_object_size(to, n, false);
3887+
3888+ if (likely(access_ok(VERIFY_READ, from, n)))
3889+ n = __copy_from_user(to, from, n);
3890+ else
3891+ memset(to, 0, n);
3892+ return n;
3893+}
3894+
3895+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
3896+{
3897+ if ((long)n < 0 || n > INT_MAX)
3898+ return n;
3899+
3900+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
3901+ if (!__builtin_constant_p(n))
3902+ check_object_size(from, n, true);
3903+ n = __copy_to_user(to, from, n);
3904+ }
3905+ return n;
3906+}
3907+
3908+extern unsigned long copy_in_user(void __user *to, const void __user *from,
3909+ unsigned long n);
3910+
3911+#endif /* __powerpc64__ */
3912+
3913 extern unsigned long __clear_user(void __user *addr, unsigned long size);
3914
3915 static inline unsigned long clear_user(void __user *addr, unsigned long size)
3916diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
3917index bb37b1d..01fe9ce 100644
3918--- a/arch/powerpc/kernel/cacheinfo.c
3919+++ b/arch/powerpc/kernel/cacheinfo.c
3920@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
3921 &cache_assoc_attr,
3922 };
3923
3924-static struct sysfs_ops cache_index_ops = {
3925+static const struct sysfs_ops cache_index_ops = {
3926 .show = cache_index_show,
3927 };
3928
3929diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
3930index 37771a5..648530c 100644
3931--- a/arch/powerpc/kernel/dma-iommu.c
3932+++ b/arch/powerpc/kernel/dma-iommu.c
3933@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
3934 }
3935
3936 /* We support DMA to/from any memory page via the iommu */
3937-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
3938+int dma_iommu_dma_supported(struct device *dev, u64 mask)
3939 {
3940 struct iommu_table *tbl = get_iommu_table_base(dev);
3941
3942diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
3943index e96cbbd..bdd6d41 100644
3944--- a/arch/powerpc/kernel/dma-swiotlb.c
3945+++ b/arch/powerpc/kernel/dma-swiotlb.c
3946@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
3947 * map_page, and unmap_page on highmem, use normal dma_ops
3948 * for everything else.
3949 */
3950-struct dma_map_ops swiotlb_dma_ops = {
3951+const struct dma_map_ops swiotlb_dma_ops = {
3952 .alloc_coherent = dma_direct_alloc_coherent,
3953 .free_coherent = dma_direct_free_coherent,
3954 .map_sg = swiotlb_map_sg_attrs,
3955diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
3956index 6215062..ebea59c 100644
3957--- a/arch/powerpc/kernel/dma.c
3958+++ b/arch/powerpc/kernel/dma.c
3959@@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
3960 }
3961 #endif
3962
3963-struct dma_map_ops dma_direct_ops = {
3964+const struct dma_map_ops dma_direct_ops = {
3965 .alloc_coherent = dma_direct_alloc_coherent,
3966 .free_coherent = dma_direct_free_coherent,
3967 .map_sg = dma_direct_map_sg,
3968diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3969index 24dcc0e..a300455 100644
3970--- a/arch/powerpc/kernel/exceptions-64e.S
3971+++ b/arch/powerpc/kernel/exceptions-64e.S
3972@@ -455,6 +455,7 @@ storage_fault_common:
3973 std r14,_DAR(r1)
3974 std r15,_DSISR(r1)
3975 addi r3,r1,STACK_FRAME_OVERHEAD
3976+ bl .save_nvgprs
3977 mr r4,r14
3978 mr r5,r15
3979 ld r14,PACA_EXGEN+EX_R14(r13)
3980@@ -464,8 +465,7 @@ storage_fault_common:
3981 cmpdi r3,0
3982 bne- 1f
3983 b .ret_from_except_lite
3984-1: bl .save_nvgprs
3985- mr r5,r3
3986+1: mr r5,r3
3987 addi r3,r1,STACK_FRAME_OVERHEAD
3988 ld r4,_DAR(r1)
3989 bl .bad_page_fault
3990diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3991index 1808876..9fd206a 100644
3992--- a/arch/powerpc/kernel/exceptions-64s.S
3993+++ b/arch/powerpc/kernel/exceptions-64s.S
3994@@ -818,10 +818,10 @@ handle_page_fault:
3995 11: ld r4,_DAR(r1)
3996 ld r5,_DSISR(r1)
3997 addi r3,r1,STACK_FRAME_OVERHEAD
3998+ bl .save_nvgprs
3999 bl .do_page_fault
4000 cmpdi r3,0
4001 beq+ 13f
4002- bl .save_nvgprs
4003 mr r5,r3
4004 addi r3,r1,STACK_FRAME_OVERHEAD
4005 lwz r4,_DAR(r1)
4006diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
4007index a4c8b38..1b09ad9 100644
4008--- a/arch/powerpc/kernel/ibmebus.c
4009+++ b/arch/powerpc/kernel/ibmebus.c
4010@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
4011 return 1;
4012 }
4013
4014-static struct dma_map_ops ibmebus_dma_ops = {
4015+static const struct dma_map_ops ibmebus_dma_ops = {
4016 .alloc_coherent = ibmebus_alloc_coherent,
4017 .free_coherent = ibmebus_free_coherent,
4018 .map_sg = ibmebus_map_sg,
4019diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
4020index 8564a41..67f3471 100644
4021--- a/arch/powerpc/kernel/irq.c
4022+++ b/arch/powerpc/kernel/irq.c
4023@@ -490,9 +490,6 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
4024 host->ops = ops;
4025 host->of_node = of_node_get(of_node);
4026
4027- if (host->ops->match == NULL)
4028- host->ops->match = default_irq_host_match;
4029-
4030 spin_lock_irqsave(&irq_big_lock, flags);
4031
4032 /* If it's a legacy controller, check for duplicates and
4033@@ -567,7 +564,12 @@ struct irq_host *irq_find_host(struct device_node *node)
4034 */
4035 spin_lock_irqsave(&irq_big_lock, flags);
4036 list_for_each_entry(h, &irq_hosts, link)
4037- if (h->ops->match(h, node)) {
4038+ if (h->ops->match) {
4039+ if (h->ops->match(h, node)) {
4040+ found = h;
4041+ break;
4042+ }
4043+ } else if (default_irq_host_match(h, node)) {
4044 found = h;
4045 break;
4046 }
4047diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
4048index 641c74b..8339ad7 100644
4049--- a/arch/powerpc/kernel/kgdb.c
4050+++ b/arch/powerpc/kernel/kgdb.c
4051@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
4052 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
4053 return 0;
4054
4055- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
4056+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
4057 regs->nip += 4;
4058
4059 return 1;
4060@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
4061 /*
4062 * Global data
4063 */
4064-struct kgdb_arch arch_kgdb_ops = {
4065+const struct kgdb_arch arch_kgdb_ops = {
4066 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
4067 };
4068
4069diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
4070index 477c663..4f50234 100644
4071--- a/arch/powerpc/kernel/module.c
4072+++ b/arch/powerpc/kernel/module.c
4073@@ -31,11 +31,24 @@
4074
4075 LIST_HEAD(module_bug_list);
4076
4077+#ifdef CONFIG_PAX_KERNEXEC
4078 void *module_alloc(unsigned long size)
4079 {
4080 if (size == 0)
4081 return NULL;
4082
4083+ return vmalloc(size);
4084+}
4085+
4086+void *module_alloc_exec(unsigned long size)
4087+#else
4088+void *module_alloc(unsigned long size)
4089+#endif
4090+
4091+{
4092+ if (size == 0)
4093+ return NULL;
4094+
4095 return vmalloc_exec(size);
4096 }
4097
4098@@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
4099 vfree(module_region);
4100 }
4101
4102+#ifdef CONFIG_PAX_KERNEXEC
4103+void module_free_exec(struct module *mod, void *module_region)
4104+{
4105+ module_free(mod, module_region);
4106+}
4107+#endif
4108+
4109 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
4110 const Elf_Shdr *sechdrs,
4111 const char *name)
4112diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
4113index f832773..0507238 100644
4114--- a/arch/powerpc/kernel/module_32.c
4115+++ b/arch/powerpc/kernel/module_32.c
4116@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
4117 me->arch.core_plt_section = i;
4118 }
4119 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
4120- printk("Module doesn't contain .plt or .init.plt sections.\n");
4121+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
4122 return -ENOEXEC;
4123 }
4124
4125@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
4126
4127 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
4128 /* Init, or core PLT? */
4129- if (location >= mod->module_core
4130- && location < mod->module_core + mod->core_size)
4131+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
4132+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
4133 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
4134- else
4135+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
4136+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
4137 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
4138+ else {
4139+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
4140+ return ~0UL;
4141+ }
4142
4143 /* Find this entry, or if that fails, the next avail. entry */
4144 while (entry->jump[0]) {
4145diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
4146index cadbed6..b9bbb00 100644
4147--- a/arch/powerpc/kernel/pci-common.c
4148+++ b/arch/powerpc/kernel/pci-common.c
4149@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
4150 unsigned int ppc_pci_flags = 0;
4151
4152
4153-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
4154+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
4155
4156-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
4157+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
4158 {
4159 pci_dma_ops = dma_ops;
4160 }
4161
4162-struct dma_map_ops *get_pci_dma_ops(void)
4163+const struct dma_map_ops *get_pci_dma_ops(void)
4164 {
4165 return pci_dma_ops;
4166 }
4167diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
4168index 7b816da..8d5c277 100644
4169--- a/arch/powerpc/kernel/process.c
4170+++ b/arch/powerpc/kernel/process.c
4171@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
4172 * Lookup NIP late so we have the best change of getting the
4173 * above info out without failing
4174 */
4175- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
4176- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
4177+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
4178+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
4179 #endif
4180 show_stack(current, (unsigned long *) regs->gpr[1]);
4181 if (!user_mode(regs))
4182@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4183 newsp = stack[0];
4184 ip = stack[STACK_FRAME_LR_SAVE];
4185 if (!firstframe || ip != lr) {
4186- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
4187+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
4188 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4189 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
4190- printk(" (%pS)",
4191+ printk(" (%pA)",
4192 (void *)current->ret_stack[curr_frame].ret);
4193 curr_frame--;
4194 }
4195@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4196 struct pt_regs *regs = (struct pt_regs *)
4197 (sp + STACK_FRAME_OVERHEAD);
4198 lr = regs->link;
4199- printk("--- Exception: %lx at %pS\n LR = %pS\n",
4200+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
4201 regs->trap, (void *)regs->nip, (void *)lr);
4202 firstframe = 1;
4203 }
4204@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
4205 }
4206
4207 #endif /* THREAD_SHIFT < PAGE_SHIFT */
4208-
4209-unsigned long arch_align_stack(unsigned long sp)
4210-{
4211- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4212- sp -= get_random_int() & ~PAGE_MASK;
4213- return sp & ~0xf;
4214-}
4215-
4216-static inline unsigned long brk_rnd(void)
4217-{
4218- unsigned long rnd = 0;
4219-
4220- /* 8MB for 32bit, 1GB for 64bit */
4221- if (is_32bit_task())
4222- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4223- else
4224- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4225-
4226- return rnd << PAGE_SHIFT;
4227-}
4228-
4229-unsigned long arch_randomize_brk(struct mm_struct *mm)
4230-{
4231- unsigned long base = mm->brk;
4232- unsigned long ret;
4233-
4234-#ifdef CONFIG_PPC_STD_MMU_64
4235- /*
4236- * If we are using 1TB segments and we are allowed to randomise
4237- * the heap, we can put it above 1TB so it is backed by a 1TB
4238- * segment. Otherwise the heap will be in the bottom 1TB
4239- * which always uses 256MB segments and this may result in a
4240- * performance penalty.
4241- */
4242- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4243- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4244-#endif
4245-
4246- ret = PAGE_ALIGN(base + brk_rnd());
4247-
4248- if (ret < mm->brk)
4249- return mm->brk;
4250-
4251- return ret;
4252-}
4253-
4254-unsigned long randomize_et_dyn(unsigned long base)
4255-{
4256- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4257-
4258- if (ret < base)
4259- return base;
4260-
4261- return ret;
4262-}
4263diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
4264index ef14988..856c4bc 100644
4265--- a/arch/powerpc/kernel/ptrace.c
4266+++ b/arch/powerpc/kernel/ptrace.c
4267@@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
4268 /*
4269 * Get contents of register REGNO in task TASK.
4270 */
4271-unsigned long ptrace_get_reg(struct task_struct *task, int regno)
4272+unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
4273 {
4274 if (task->thread.regs == NULL)
4275 return -EIO;
4276@@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
4277
4278 CHECK_FULL_REGS(child->thread.regs);
4279 if (index < PT_FPR0) {
4280- tmp = ptrace_get_reg(child, (int) index);
4281+ tmp = ptrace_get_reg(child, index);
4282 } else {
4283 flush_fp_to_thread(child);
4284 tmp = ((unsigned long *)child->thread.fpr)
4285diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
4286index d670429..2bc59b2 100644
4287--- a/arch/powerpc/kernel/signal_32.c
4288+++ b/arch/powerpc/kernel/signal_32.c
4289@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
4290 /* Save user registers on the stack */
4291 frame = &rt_sf->uc.uc_mcontext;
4292 addr = frame;
4293- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
4294+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4295 if (save_user_regs(regs, frame, 0, 1))
4296 goto badframe;
4297 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
4298diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
4299index 2fe6fc6..ada0d96 100644
4300--- a/arch/powerpc/kernel/signal_64.c
4301+++ b/arch/powerpc/kernel/signal_64.c
4302@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
4303 current->thread.fpscr.val = 0;
4304
4305 /* Set up to return from userspace. */
4306- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
4307+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4308 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
4309 } else {
4310 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
4311diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
4312index b97c2d6..dd01a6a 100644
4313--- a/arch/powerpc/kernel/sys_ppc32.c
4314+++ b/arch/powerpc/kernel/sys_ppc32.c
4315@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
4316 if (oldlenp) {
4317 if (!error) {
4318 if (get_user(oldlen, oldlenp) ||
4319- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
4320+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
4321+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
4322 error = -EFAULT;
4323 }
4324- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
4325 }
4326 return error;
4327 }
4328diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
4329index 6f0ae1a..e4b6a56 100644
4330--- a/arch/powerpc/kernel/traps.c
4331+++ b/arch/powerpc/kernel/traps.c
4332@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
4333 static inline void pmac_backlight_unblank(void) { }
4334 #endif
4335
4336+extern void gr_handle_kernel_exploit(void);
4337+
4338 int die(const char *str, struct pt_regs *regs, long err)
4339 {
4340 static struct {
4341@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
4342 if (panic_on_oops)
4343 panic("Fatal exception");
4344
4345+ gr_handle_kernel_exploit();
4346+
4347 oops_exit();
4348 do_exit(err);
4349
4350diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
4351index 137dc22..fe57a79 100644
4352--- a/arch/powerpc/kernel/vdso.c
4353+++ b/arch/powerpc/kernel/vdso.c
4354@@ -36,6 +36,7 @@
4355 #include <asm/firmware.h>
4356 #include <asm/vdso.h>
4357 #include <asm/vdso_datapage.h>
4358+#include <asm/mman.h>
4359
4360 #include "setup.h"
4361
4362@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4363 vdso_base = VDSO32_MBASE;
4364 #endif
4365
4366- current->mm->context.vdso_base = 0;
4367+ current->mm->context.vdso_base = ~0UL;
4368
4369 /* vDSO has a problem and was disabled, just don't "enable" it for the
4370 * process
4371@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4372 vdso_base = get_unmapped_area(NULL, vdso_base,
4373 (vdso_pages << PAGE_SHIFT) +
4374 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
4375- 0, 0);
4376+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
4377 if (IS_ERR_VALUE(vdso_base)) {
4378 rc = vdso_base;
4379 goto fail_mmapsem;
4380diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
4381index 77f6421..829564a 100644
4382--- a/arch/powerpc/kernel/vio.c
4383+++ b/arch/powerpc/kernel/vio.c
4384@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
4385 vio_cmo_dealloc(viodev, alloc_size);
4386 }
4387
4388-struct dma_map_ops vio_dma_mapping_ops = {
4389+static const struct dma_map_ops vio_dma_mapping_ops = {
4390 .alloc_coherent = vio_dma_iommu_alloc_coherent,
4391 .free_coherent = vio_dma_iommu_free_coherent,
4392 .map_sg = vio_dma_iommu_map_sg,
4393 .unmap_sg = vio_dma_iommu_unmap_sg,
4394+ .dma_supported = dma_iommu_dma_supported,
4395 .map_page = vio_dma_iommu_map_page,
4396 .unmap_page = vio_dma_iommu_unmap_page,
4397
4398@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
4399
4400 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
4401 {
4402- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
4403 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
4404 }
4405
4406diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4407index 5eea6f3..5d10396 100644
4408--- a/arch/powerpc/lib/usercopy_64.c
4409+++ b/arch/powerpc/lib/usercopy_64.c
4410@@ -9,22 +9,6 @@
4411 #include <linux/module.h>
4412 #include <asm/uaccess.h>
4413
4414-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4415-{
4416- if (likely(access_ok(VERIFY_READ, from, n)))
4417- n = __copy_from_user(to, from, n);
4418- else
4419- memset(to, 0, n);
4420- return n;
4421-}
4422-
4423-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4424-{
4425- if (likely(access_ok(VERIFY_WRITE, to, n)))
4426- n = __copy_to_user(to, from, n);
4427- return n;
4428-}
4429-
4430 unsigned long copy_in_user(void __user *to, const void __user *from,
4431 unsigned long n)
4432 {
4433@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4434 return n;
4435 }
4436
4437-EXPORT_SYMBOL(copy_from_user);
4438-EXPORT_SYMBOL(copy_to_user);
4439 EXPORT_SYMBOL(copy_in_user);
4440
4441diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4442index e7dae82..877ce0d 100644
4443--- a/arch/powerpc/mm/fault.c
4444+++ b/arch/powerpc/mm/fault.c
4445@@ -30,6 +30,10 @@
4446 #include <linux/kprobes.h>
4447 #include <linux/kdebug.h>
4448 #include <linux/perf_event.h>
4449+#include <linux/slab.h>
4450+#include <linux/pagemap.h>
4451+#include <linux/compiler.h>
4452+#include <linux/unistd.h>
4453
4454 #include <asm/firmware.h>
4455 #include <asm/page.h>
4456@@ -40,6 +44,7 @@
4457 #include <asm/uaccess.h>
4458 #include <asm/tlbflush.h>
4459 #include <asm/siginfo.h>
4460+#include <asm/ptrace.h>
4461
4462
4463 #ifdef CONFIG_KPROBES
4464@@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4465 }
4466 #endif
4467
4468+#ifdef CONFIG_PAX_PAGEEXEC
4469+/*
4470+ * PaX: decide what to do with offenders (regs->nip = fault address)
4471+ *
4472+ * returns 1 when task should be killed
4473+ */
4474+static int pax_handle_fetch_fault(struct pt_regs *regs)
4475+{
4476+ return 1;
4477+}
4478+
4479+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4480+{
4481+ unsigned long i;
4482+
4483+ printk(KERN_ERR "PAX: bytes at PC: ");
4484+ for (i = 0; i < 5; i++) {
4485+ unsigned int c;
4486+ if (get_user(c, (unsigned int __user *)pc+i))
4487+ printk(KERN_CONT "???????? ");
4488+ else
4489+ printk(KERN_CONT "%08x ", c);
4490+ }
4491+ printk("\n");
4492+}
4493+#endif
4494+
4495 /*
4496 * Check whether the instruction at regs->nip is a store using
4497 * an update addressing form which will update r1.
4498@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4499 * indicate errors in DSISR but can validly be set in SRR1.
4500 */
4501 if (trap == 0x400)
4502- error_code &= 0x48200000;
4503+ error_code &= 0x58200000;
4504 else
4505 is_write = error_code & DSISR_ISSTORE;
4506 #else
4507@@ -250,7 +282,7 @@ good_area:
4508 * "undefined". Of those that can be set, this is the only
4509 * one which seems bad.
4510 */
4511- if (error_code & 0x10000000)
4512+ if (error_code & DSISR_GUARDED)
4513 /* Guarded storage error. */
4514 goto bad_area;
4515 #endif /* CONFIG_8xx */
4516@@ -265,7 +297,7 @@ good_area:
4517 * processors use the same I/D cache coherency mechanism
4518 * as embedded.
4519 */
4520- if (error_code & DSISR_PROTFAULT)
4521+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4522 goto bad_area;
4523 #endif /* CONFIG_PPC_STD_MMU */
4524
4525@@ -335,6 +367,23 @@ bad_area:
4526 bad_area_nosemaphore:
4527 /* User mode accesses cause a SIGSEGV */
4528 if (user_mode(regs)) {
4529+
4530+#ifdef CONFIG_PAX_PAGEEXEC
4531+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4532+#ifdef CONFIG_PPC_STD_MMU
4533+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4534+#else
4535+ if (is_exec && regs->nip == address) {
4536+#endif
4537+ switch (pax_handle_fetch_fault(regs)) {
4538+ }
4539+
4540+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4541+ do_group_exit(SIGKILL);
4542+ }
4543+ }
4544+#endif
4545+
4546 _exception(SIGSEGV, regs, code, address);
4547 return 0;
4548 }
4549diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
4550index 5973631..ad617af 100644
4551--- a/arch/powerpc/mm/mem.c
4552+++ b/arch/powerpc/mm/mem.c
4553@@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
4554 {
4555 unsigned long lmb_next_region_start_pfn,
4556 lmb_region_max_pfn;
4557- int i;
4558+ unsigned int i;
4559
4560 for (i = 0; i < lmb.memory.cnt - 1; i++) {
4561 lmb_region_max_pfn =
4562diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4563index 0d957a4..26d968f 100644
4564--- a/arch/powerpc/mm/mmap_64.c
4565+++ b/arch/powerpc/mm/mmap_64.c
4566@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4567 */
4568 if (mmap_is_legacy()) {
4569 mm->mmap_base = TASK_UNMAPPED_BASE;
4570+
4571+#ifdef CONFIG_PAX_RANDMMAP
4572+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4573+ mm->mmap_base += mm->delta_mmap;
4574+#endif
4575+
4576 mm->get_unmapped_area = arch_get_unmapped_area;
4577 mm->unmap_area = arch_unmap_area;
4578 } else {
4579 mm->mmap_base = mmap_base();
4580+
4581+#ifdef CONFIG_PAX_RANDMMAP
4582+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4583+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4584+#endif
4585+
4586 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4587 mm->unmap_area = arch_unmap_area_topdown;
4588 }
4589diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4590index ba51948..23009d9 100644
4591--- a/arch/powerpc/mm/slice.c
4592+++ b/arch/powerpc/mm/slice.c
4593@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4594 if ((mm->task_size - len) < addr)
4595 return 0;
4596 vma = find_vma(mm, addr);
4597- return (!vma || (addr + len) <= vma->vm_start);
4598+ return check_heap_stack_gap(vma, addr, len);
4599 }
4600
4601 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4602@@ -256,7 +256,7 @@ full_search:
4603 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4604 continue;
4605 }
4606- if (!vma || addr + len <= vma->vm_start) {
4607+ if (check_heap_stack_gap(vma, addr, len)) {
4608 /*
4609 * Remember the place where we stopped the search:
4610 */
4611@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4612 }
4613 }
4614
4615- addr = mm->mmap_base;
4616- while (addr > len) {
4617+ if (mm->mmap_base < len)
4618+ addr = -ENOMEM;
4619+ else
4620+ addr = mm->mmap_base - len;
4621+
4622+ while (!IS_ERR_VALUE(addr)) {
4623 /* Go down by chunk size */
4624- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4625+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
4626
4627 /* Check for hit with different page size */
4628 mask = slice_range_to_mask(addr, len);
4629@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4630 * return with success:
4631 */
4632 vma = find_vma(mm, addr);
4633- if (!vma || (addr + len) <= vma->vm_start) {
4634+ if (check_heap_stack_gap(vma, addr, len)) {
4635 /* remember the address as a hint for next time */
4636 if (use_cache)
4637 mm->free_area_cache = addr;
4638@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4639 mm->cached_hole_size = vma->vm_start - addr;
4640
4641 /* try just below the current vma->vm_start */
4642- addr = vma->vm_start;
4643+ addr = skip_heap_stack_gap(vma, len);
4644 }
4645
4646 /*
4647@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4648 if (fixed && addr > (mm->task_size - len))
4649 return -EINVAL;
4650
4651+#ifdef CONFIG_PAX_RANDMMAP
4652+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4653+ addr = 0;
4654+#endif
4655+
4656 /* If hint, make sure it matches our alignment restrictions */
4657 if (!fixed && addr) {
4658 addr = _ALIGN_UP(addr, 1ul << pshift);
4659diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
4660index b5c753d..8f01abe 100644
4661--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
4662+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
4663@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
4664 lite5200_pm_target_state = PM_SUSPEND_ON;
4665 }
4666
4667-static struct platform_suspend_ops lite5200_pm_ops = {
4668+static const struct platform_suspend_ops lite5200_pm_ops = {
4669 .valid = lite5200_pm_valid,
4670 .begin = lite5200_pm_begin,
4671 .prepare = lite5200_pm_prepare,
4672diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
4673index a55b0b6..478c18e 100644
4674--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
4675+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
4676@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
4677 iounmap(mbar);
4678 }
4679
4680-static struct platform_suspend_ops mpc52xx_pm_ops = {
4681+static const struct platform_suspend_ops mpc52xx_pm_ops = {
4682 .valid = mpc52xx_pm_valid,
4683 .prepare = mpc52xx_pm_prepare,
4684 .enter = mpc52xx_pm_enter,
4685diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
4686index 08e65fc..643d3ac 100644
4687--- a/arch/powerpc/platforms/83xx/suspend.c
4688+++ b/arch/powerpc/platforms/83xx/suspend.c
4689@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
4690 return ret;
4691 }
4692
4693-static struct platform_suspend_ops mpc83xx_suspend_ops = {
4694+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
4695 .valid = mpc83xx_suspend_valid,
4696 .begin = mpc83xx_suspend_begin,
4697 .enter = mpc83xx_suspend_enter,
4698diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
4699index ca5bfdf..1602e09 100644
4700--- a/arch/powerpc/platforms/cell/iommu.c
4701+++ b/arch/powerpc/platforms/cell/iommu.c
4702@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
4703
4704 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
4705
4706-struct dma_map_ops dma_iommu_fixed_ops = {
4707+const struct dma_map_ops dma_iommu_fixed_ops = {
4708 .alloc_coherent = dma_fixed_alloc_coherent,
4709 .free_coherent = dma_fixed_free_coherent,
4710 .map_sg = dma_fixed_map_sg,
4711diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
4712index e34b305..20e48ec 100644
4713--- a/arch/powerpc/platforms/ps3/system-bus.c
4714+++ b/arch/powerpc/platforms/ps3/system-bus.c
4715@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
4716 return mask >= DMA_BIT_MASK(32);
4717 }
4718
4719-static struct dma_map_ops ps3_sb_dma_ops = {
4720+static const struct dma_map_ops ps3_sb_dma_ops = {
4721 .alloc_coherent = ps3_alloc_coherent,
4722 .free_coherent = ps3_free_coherent,
4723 .map_sg = ps3_sb_map_sg,
4724@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
4725 .unmap_page = ps3_unmap_page,
4726 };
4727
4728-static struct dma_map_ops ps3_ioc0_dma_ops = {
4729+static const struct dma_map_ops ps3_ioc0_dma_ops = {
4730 .alloc_coherent = ps3_alloc_coherent,
4731 .free_coherent = ps3_free_coherent,
4732 .map_sg = ps3_ioc0_map_sg,
4733diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
4734index f0e6f28..60d53ed 100644
4735--- a/arch/powerpc/platforms/pseries/Kconfig
4736+++ b/arch/powerpc/platforms/pseries/Kconfig
4737@@ -2,6 +2,8 @@ config PPC_PSERIES
4738 depends on PPC64 && PPC_BOOK3S
4739 bool "IBM pSeries & new (POWER5-based) iSeries"
4740 select MPIC
4741+ select PCI_MSI
4742+ select XICS
4743 select PPC_I8259
4744 select PPC_RTAS
4745 select RTAS_ERROR_LOGGING
4746diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
4747index 43c0aca..42c045b 100644
4748--- a/arch/s390/Kconfig
4749+++ b/arch/s390/Kconfig
4750@@ -194,28 +194,26 @@ config AUDIT_ARCH
4751
4752 config S390_SWITCH_AMODE
4753 bool "Switch kernel/user addressing modes"
4754+ default y
4755 help
4756 This option allows to switch the addressing modes of kernel and user
4757- space. The kernel parameter switch_amode=on will enable this feature,
4758- default is disabled. Enabling this (via kernel parameter) on machines
4759- earlier than IBM System z9-109 EC/BC will reduce system performance.
4760+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
4761+ will reduce system performance.
4762
4763 Note that this option will also be selected by selecting the execute
4764- protection option below. Enabling the execute protection via the
4765- noexec kernel parameter will also switch the addressing modes,
4766- independent of the switch_amode kernel parameter.
4767+ protection option below. Enabling the execute protection will also
4768+ switch the addressing modes, independent of this option.
4769
4770
4771 config S390_EXEC_PROTECT
4772 bool "Data execute protection"
4773+ default y
4774 select S390_SWITCH_AMODE
4775 help
4776 This option allows to enable a buffer overflow protection for user
4777 space programs and it also selects the addressing mode option above.
4778- The kernel parameter noexec=on will enable this feature and also
4779- switch the addressing modes, default is disabled. Enabling this (via
4780- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
4781- will reduce system performance.
4782+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
4783+ reduce system performance.
4784
4785 comment "Code generation options"
4786
4787diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4788index ae7c8f9..3f01a0c 100644
4789--- a/arch/s390/include/asm/atomic.h
4790+++ b/arch/s390/include/asm/atomic.h
4791@@ -362,6 +362,16 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
4792 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4793 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4794
4795+#define atomic64_read_unchecked(v) atomic64_read(v)
4796+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4797+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4798+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4799+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4800+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4801+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4802+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4803+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4804+
4805 #define smp_mb__before_atomic_dec() smp_mb()
4806 #define smp_mb__after_atomic_dec() smp_mb()
4807 #define smp_mb__before_atomic_inc() smp_mb()
4808diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4809index 9b86681..c5140db 100644
4810--- a/arch/s390/include/asm/cache.h
4811+++ b/arch/s390/include/asm/cache.h
4812@@ -11,8 +11,10 @@
4813 #ifndef __ARCH_S390_CACHE_H
4814 #define __ARCH_S390_CACHE_H
4815
4816-#define L1_CACHE_BYTES 256
4817+#include <linux/const.h>
4818+
4819 #define L1_CACHE_SHIFT 8
4820+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4821
4822 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
4823
4824diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4825index e885442..e3a2817 100644
4826--- a/arch/s390/include/asm/elf.h
4827+++ b/arch/s390/include/asm/elf.h
4828@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
4829 that it will "exec", and that there is sufficient room for the brk. */
4830 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4831
4832+#ifdef CONFIG_PAX_ASLR
4833+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4834+
4835+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4836+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4837+#endif
4838+
4839 /* This yields a mask that user programs can use to figure out what
4840 instruction set this CPU supports. */
4841
4842diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
4843index e37478e..9ce0e9f 100644
4844--- a/arch/s390/include/asm/setup.h
4845+++ b/arch/s390/include/asm/setup.h
4846@@ -50,13 +50,13 @@ extern unsigned long memory_end;
4847 void detect_memory_layout(struct mem_chunk chunk[]);
4848
4849 #ifdef CONFIG_S390_SWITCH_AMODE
4850-extern unsigned int switch_amode;
4851+#define switch_amode (1)
4852 #else
4853 #define switch_amode (0)
4854 #endif
4855
4856 #ifdef CONFIG_S390_EXEC_PROTECT
4857-extern unsigned int s390_noexec;
4858+#define s390_noexec (1)
4859 #else
4860 #define s390_noexec (0)
4861 #endif
4862diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4863index 8377e91..e28e6f1 100644
4864--- a/arch/s390/include/asm/uaccess.h
4865+++ b/arch/s390/include/asm/uaccess.h
4866@@ -232,6 +232,10 @@ static inline unsigned long __must_check
4867 copy_to_user(void __user *to, const void *from, unsigned long n)
4868 {
4869 might_fault();
4870+
4871+ if ((long)n < 0)
4872+ return n;
4873+
4874 if (access_ok(VERIFY_WRITE, to, n))
4875 n = __copy_to_user(to, from, n);
4876 return n;
4877@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4878 static inline unsigned long __must_check
4879 __copy_from_user(void *to, const void __user *from, unsigned long n)
4880 {
4881+ if ((long)n < 0)
4882+ return n;
4883+
4884 if (__builtin_constant_p(n) && (n <= 256))
4885 return uaccess.copy_from_user_small(n, from, to);
4886 else
4887@@ -283,6 +290,10 @@ static inline unsigned long __must_check
4888 copy_from_user(void *to, const void __user *from, unsigned long n)
4889 {
4890 might_fault();
4891+
4892+ if ((long)n < 0)
4893+ return n;
4894+
4895 if (access_ok(VERIFY_READ, from, n))
4896 n = __copy_from_user(to, from, n);
4897 else
4898diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4899index 639380a..72e3c02 100644
4900--- a/arch/s390/kernel/module.c
4901+++ b/arch/s390/kernel/module.c
4902@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4903
4904 /* Increase core size by size of got & plt and set start
4905 offsets for got and plt. */
4906- me->core_size = ALIGN(me->core_size, 4);
4907- me->arch.got_offset = me->core_size;
4908- me->core_size += me->arch.got_size;
4909- me->arch.plt_offset = me->core_size;
4910- me->core_size += me->arch.plt_size;
4911+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
4912+ me->arch.got_offset = me->core_size_rw;
4913+ me->core_size_rw += me->arch.got_size;
4914+ me->arch.plt_offset = me->core_size_rx;
4915+ me->core_size_rx += me->arch.plt_size;
4916 return 0;
4917 }
4918
4919@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4920 if (info->got_initialized == 0) {
4921 Elf_Addr *gotent;
4922
4923- gotent = me->module_core + me->arch.got_offset +
4924+ gotent = me->module_core_rw + me->arch.got_offset +
4925 info->got_offset;
4926 *gotent = val;
4927 info->got_initialized = 1;
4928@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4929 else if (r_type == R_390_GOTENT ||
4930 r_type == R_390_GOTPLTENT)
4931 *(unsigned int *) loc =
4932- (val + (Elf_Addr) me->module_core - loc) >> 1;
4933+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4934 else if (r_type == R_390_GOT64 ||
4935 r_type == R_390_GOTPLT64)
4936 *(unsigned long *) loc = val;
4937@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4938 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4939 if (info->plt_initialized == 0) {
4940 unsigned int *ip;
4941- ip = me->module_core + me->arch.plt_offset +
4942+ ip = me->module_core_rx + me->arch.plt_offset +
4943 info->plt_offset;
4944 #ifndef CONFIG_64BIT
4945 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4946@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4947 val - loc + 0xffffUL < 0x1ffffeUL) ||
4948 (r_type == R_390_PLT32DBL &&
4949 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4950- val = (Elf_Addr) me->module_core +
4951+ val = (Elf_Addr) me->module_core_rx +
4952 me->arch.plt_offset +
4953 info->plt_offset;
4954 val += rela->r_addend - loc;
4955@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4956 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4957 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4958 val = val + rela->r_addend -
4959- ((Elf_Addr) me->module_core + me->arch.got_offset);
4960+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4961 if (r_type == R_390_GOTOFF16)
4962 *(unsigned short *) loc = val;
4963 else if (r_type == R_390_GOTOFF32)
4964@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4965 break;
4966 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4967 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4968- val = (Elf_Addr) me->module_core + me->arch.got_offset +
4969+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4970 rela->r_addend - loc;
4971 if (r_type == R_390_GOTPC)
4972 *(unsigned int *) loc = val;
4973diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
4974index 0b2573a..71a22ec 100644
4975--- a/arch/s390/kernel/setup.c
4976+++ b/arch/s390/kernel/setup.c
4977@@ -306,9 +306,6 @@ static int __init early_parse_mem(char *p)
4978 early_param("mem", early_parse_mem);
4979
4980 #ifdef CONFIG_S390_SWITCH_AMODE
4981-unsigned int switch_amode = 0;
4982-EXPORT_SYMBOL_GPL(switch_amode);
4983-
4984 static int set_amode_and_uaccess(unsigned long user_amode,
4985 unsigned long user32_amode)
4986 {
4987@@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
4988 return 0;
4989 }
4990 }
4991-
4992-/*
4993- * Switch kernel/user addressing modes?
4994- */
4995-static int __init early_parse_switch_amode(char *p)
4996-{
4997- switch_amode = 1;
4998- return 0;
4999-}
5000-early_param("switch_amode", early_parse_switch_amode);
5001-
5002 #else /* CONFIG_S390_SWITCH_AMODE */
5003 static inline int set_amode_and_uaccess(unsigned long user_amode,
5004 unsigned long user32_amode)
5005@@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
5006 }
5007 #endif /* CONFIG_S390_SWITCH_AMODE */
5008
5009-#ifdef CONFIG_S390_EXEC_PROTECT
5010-unsigned int s390_noexec = 0;
5011-EXPORT_SYMBOL_GPL(s390_noexec);
5012-
5013-/*
5014- * Enable execute protection?
5015- */
5016-static int __init early_parse_noexec(char *p)
5017-{
5018- if (!strncmp(p, "off", 3))
5019- return 0;
5020- switch_amode = 1;
5021- s390_noexec = 1;
5022- return 0;
5023-}
5024-early_param("noexec", early_parse_noexec);
5025-#endif /* CONFIG_S390_EXEC_PROTECT */
5026-
5027 static void setup_addressing_mode(void)
5028 {
5029 if (s390_noexec) {
5030diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
5031index 0ab74ae..c8b68f9 100644
5032--- a/arch/s390/mm/mmap.c
5033+++ b/arch/s390/mm/mmap.c
5034@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5035 */
5036 if (mmap_is_legacy()) {
5037 mm->mmap_base = TASK_UNMAPPED_BASE;
5038+
5039+#ifdef CONFIG_PAX_RANDMMAP
5040+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5041+ mm->mmap_base += mm->delta_mmap;
5042+#endif
5043+
5044 mm->get_unmapped_area = arch_get_unmapped_area;
5045 mm->unmap_area = arch_unmap_area;
5046 } else {
5047 mm->mmap_base = mmap_base();
5048+
5049+#ifdef CONFIG_PAX_RANDMMAP
5050+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5051+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5052+#endif
5053+
5054 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5055 mm->unmap_area = arch_unmap_area_topdown;
5056 }
5057@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5058 */
5059 if (mmap_is_legacy()) {
5060 mm->mmap_base = TASK_UNMAPPED_BASE;
5061+
5062+#ifdef CONFIG_PAX_RANDMMAP
5063+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5064+ mm->mmap_base += mm->delta_mmap;
5065+#endif
5066+
5067 mm->get_unmapped_area = s390_get_unmapped_area;
5068 mm->unmap_area = arch_unmap_area;
5069 } else {
5070 mm->mmap_base = mmap_base();
5071+
5072+#ifdef CONFIG_PAX_RANDMMAP
5073+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5074+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5075+#endif
5076+
5077 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
5078 mm->unmap_area = arch_unmap_area_topdown;
5079 }
5080diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
5081index ae3d59f..f65f075 100644
5082--- a/arch/score/include/asm/cache.h
5083+++ b/arch/score/include/asm/cache.h
5084@@ -1,7 +1,9 @@
5085 #ifndef _ASM_SCORE_CACHE_H
5086 #define _ASM_SCORE_CACHE_H
5087
5088+#include <linux/const.h>
5089+
5090 #define L1_CACHE_SHIFT 4
5091-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5092+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5093
5094 #endif /* _ASM_SCORE_CACHE_H */
5095diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
5096index 589d5c7..669e274 100644
5097--- a/arch/score/include/asm/system.h
5098+++ b/arch/score/include/asm/system.h
5099@@ -17,7 +17,7 @@ do { \
5100 #define finish_arch_switch(prev) do {} while (0)
5101
5102 typedef void (*vi_handler_t)(void);
5103-extern unsigned long arch_align_stack(unsigned long sp);
5104+#define arch_align_stack(x) (x)
5105
5106 #define mb() barrier()
5107 #define rmb() barrier()
5108diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
5109index 25d0803..d6c8e36 100644
5110--- a/arch/score/kernel/process.c
5111+++ b/arch/score/kernel/process.c
5112@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
5113
5114 return task_pt_regs(task)->cp0_epc;
5115 }
5116-
5117-unsigned long arch_align_stack(unsigned long sp)
5118-{
5119- return sp;
5120-}
5121diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
5122index d936c1a..304a252 100644
5123--- a/arch/sh/boards/mach-hp6xx/pm.c
5124+++ b/arch/sh/boards/mach-hp6xx/pm.c
5125@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
5126 return 0;
5127 }
5128
5129-static struct platform_suspend_ops hp6x0_pm_ops = {
5130+static const struct platform_suspend_ops hp6x0_pm_ops = {
5131 .enter = hp6x0_pm_enter,
5132 .valid = suspend_valid_only_mem,
5133 };
5134diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
5135index 02df18e..ae3a793 100644
5136--- a/arch/sh/include/asm/cache.h
5137+++ b/arch/sh/include/asm/cache.h
5138@@ -9,10 +9,11 @@
5139 #define __ASM_SH_CACHE_H
5140 #ifdef __KERNEL__
5141
5142+#include <linux/const.h>
5143 #include <linux/init.h>
5144 #include <cpu/cache.h>
5145
5146-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5147+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5148
5149 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
5150
5151diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
5152index 8a8a993..7b3079b 100644
5153--- a/arch/sh/kernel/cpu/sh4/sq.c
5154+++ b/arch/sh/kernel/cpu/sh4/sq.c
5155@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
5156 NULL,
5157 };
5158
5159-static struct sysfs_ops sq_sysfs_ops = {
5160+static const struct sysfs_ops sq_sysfs_ops = {
5161 .show = sq_sysfs_show,
5162 .store = sq_sysfs_store,
5163 };
5164diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
5165index ee3c2aa..c49cee6 100644
5166--- a/arch/sh/kernel/cpu/shmobile/pm.c
5167+++ b/arch/sh/kernel/cpu/shmobile/pm.c
5168@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
5169 return 0;
5170 }
5171
5172-static struct platform_suspend_ops sh_pm_ops = {
5173+static const struct platform_suspend_ops sh_pm_ops = {
5174 .enter = sh_pm_enter,
5175 .valid = suspend_valid_only_mem,
5176 };
5177diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
5178index 3e532d0..9faa306 100644
5179--- a/arch/sh/kernel/kgdb.c
5180+++ b/arch/sh/kernel/kgdb.c
5181@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
5182 {
5183 }
5184
5185-struct kgdb_arch arch_kgdb_ops = {
5186+const struct kgdb_arch arch_kgdb_ops = {
5187 /* Breakpoint instruction: trapa #0x3c */
5188 #ifdef CONFIG_CPU_LITTLE_ENDIAN
5189 .gdb_bpt_instr = { 0x3c, 0xc3 },
5190diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
5191index afeb710..d1d1289 100644
5192--- a/arch/sh/mm/mmap.c
5193+++ b/arch/sh/mm/mmap.c
5194@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5195 addr = PAGE_ALIGN(addr);
5196
5197 vma = find_vma(mm, addr);
5198- if (TASK_SIZE - len >= addr &&
5199- (!vma || addr + len <= vma->vm_start))
5200+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5201 return addr;
5202 }
5203
5204@@ -106,7 +105,7 @@ full_search:
5205 }
5206 return -ENOMEM;
5207 }
5208- if (likely(!vma || addr + len <= vma->vm_start)) {
5209+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5210 /*
5211 * Remember the place where we stopped the search:
5212 */
5213@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5214 addr = PAGE_ALIGN(addr);
5215
5216 vma = find_vma(mm, addr);
5217- if (TASK_SIZE - len >= addr &&
5218- (!vma || addr + len <= vma->vm_start))
5219+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5220 return addr;
5221 }
5222
5223@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5224 /* make sure it can fit in the remaining address space */
5225 if (likely(addr > len)) {
5226 vma = find_vma(mm, addr-len);
5227- if (!vma || addr <= vma->vm_start) {
5228+ if (check_heap_stack_gap(vma, addr - len, len)) {
5229 /* remember the address as a hint for next time */
5230 return (mm->free_area_cache = addr-len);
5231 }
5232@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5233 if (unlikely(mm->mmap_base < len))
5234 goto bottomup;
5235
5236- addr = mm->mmap_base-len;
5237- if (do_colour_align)
5238- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5239+ addr = mm->mmap_base - len;
5240
5241 do {
5242+ if (do_colour_align)
5243+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5244 /*
5245 * Lookup failure means no vma is above this address,
5246 * else if new region fits below vma->vm_start,
5247 * return with success:
5248 */
5249 vma = find_vma(mm, addr);
5250- if (likely(!vma || addr+len <= vma->vm_start)) {
5251+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5252 /* remember the address as a hint for next time */
5253 return (mm->free_area_cache = addr);
5254 }
5255@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5256 mm->cached_hole_size = vma->vm_start - addr;
5257
5258 /* try just below the current vma->vm_start */
5259- addr = vma->vm_start-len;
5260- if (do_colour_align)
5261- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5262- } while (likely(len < vma->vm_start));
5263+ addr = skip_heap_stack_gap(vma, len);
5264+ } while (!IS_ERR_VALUE(addr));
5265
5266 bottomup:
5267 /*
5268diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
5269index 05ef538..dc9c857 100644
5270--- a/arch/sparc/Kconfig
5271+++ b/arch/sparc/Kconfig
5272@@ -32,6 +32,7 @@ config SPARC
5273
5274 config SPARC32
5275 def_bool !64BIT
5276+ select GENERIC_ATOMIC64
5277
5278 config SPARC64
5279 def_bool 64BIT
5280diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
5281index 113225b..7fd04e7 100644
5282--- a/arch/sparc/Makefile
5283+++ b/arch/sparc/Makefile
5284@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
5285 # Export what is needed by arch/sparc/boot/Makefile
5286 export VMLINUX_INIT VMLINUX_MAIN
5287 VMLINUX_INIT := $(head-y) $(init-y)
5288-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5289+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5290 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5291 VMLINUX_MAIN += $(drivers-y) $(net-y)
5292
5293diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
5294index f0d343c..cf36e68 100644
5295--- a/arch/sparc/include/asm/atomic_32.h
5296+++ b/arch/sparc/include/asm/atomic_32.h
5297@@ -13,6 +13,8 @@
5298
5299 #include <linux/types.h>
5300
5301+#include <asm-generic/atomic64.h>
5302+
5303 #ifdef __KERNEL__
5304
5305 #include <asm/system.h>
5306diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
5307index f5cc06f..f858d47 100644
5308--- a/arch/sparc/include/asm/atomic_64.h
5309+++ b/arch/sparc/include/asm/atomic_64.h
5310@@ -14,18 +14,40 @@
5311 #define ATOMIC64_INIT(i) { (i) }
5312
5313 #define atomic_read(v) ((v)->counter)
5314+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5315+{
5316+ return v->counter;
5317+}
5318 #define atomic64_read(v) ((v)->counter)
5319+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5320+{
5321+ return v->counter;
5322+}
5323
5324 #define atomic_set(v, i) (((v)->counter) = i)
5325+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5326+{
5327+ v->counter = i;
5328+}
5329 #define atomic64_set(v, i) (((v)->counter) = i)
5330+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5331+{
5332+ v->counter = i;
5333+}
5334
5335 extern void atomic_add(int, atomic_t *);
5336+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
5337 extern void atomic64_add(long, atomic64_t *);
5338+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
5339 extern void atomic_sub(int, atomic_t *);
5340+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
5341 extern void atomic64_sub(long, atomic64_t *);
5342+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
5343
5344 extern int atomic_add_ret(int, atomic_t *);
5345+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
5346 extern long atomic64_add_ret(long, atomic64_t *);
5347+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
5348 extern int atomic_sub_ret(int, atomic_t *);
5349 extern long atomic64_sub_ret(long, atomic64_t *);
5350
5351@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5352 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
5353
5354 #define atomic_inc_return(v) atomic_add_ret(1, v)
5355+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5356+{
5357+ return atomic_add_ret_unchecked(1, v);
5358+}
5359 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
5360+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5361+{
5362+ return atomic64_add_ret_unchecked(1, v);
5363+}
5364
5365 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
5366 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
5367
5368 #define atomic_add_return(i, v) atomic_add_ret(i, v)
5369+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5370+{
5371+ return atomic_add_ret_unchecked(i, v);
5372+}
5373 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
5374+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
5375+{
5376+ return atomic64_add_ret_unchecked(i, v);
5377+}
5378
5379 /*
5380 * atomic_inc_and_test - increment and test
5381@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5382 * other cases.
5383 */
5384 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5385+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5386+{
5387+ return atomic_inc_return_unchecked(v) == 0;
5388+}
5389 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
5390
5391 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
5392@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5393 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
5394
5395 #define atomic_inc(v) atomic_add(1, v)
5396+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
5397+{
5398+ atomic_add_unchecked(1, v);
5399+}
5400 #define atomic64_inc(v) atomic64_add(1, v)
5401+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
5402+{
5403+ atomic64_add_unchecked(1, v);
5404+}
5405
5406 #define atomic_dec(v) atomic_sub(1, v)
5407+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
5408+{
5409+ atomic_sub_unchecked(1, v);
5410+}
5411 #define atomic64_dec(v) atomic64_sub(1, v)
5412+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
5413+{
5414+ atomic64_sub_unchecked(1, v);
5415+}
5416
5417 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
5418 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
5419
5420 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5421+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
5422+{
5423+ return cmpxchg(&v->counter, old, new);
5424+}
5425 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
5426+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5427+{
5428+ return xchg(&v->counter, new);
5429+}
5430
5431 static inline int atomic_add_unless(atomic_t *v, int a, int u)
5432 {
5433- int c, old;
5434+ int c, old, new;
5435 c = atomic_read(v);
5436 for (;;) {
5437- if (unlikely(c == (u)))
5438+ if (unlikely(c == u))
5439 break;
5440- old = atomic_cmpxchg((v), c, c + (a));
5441+
5442+ asm volatile("addcc %2, %0, %0\n"
5443+
5444+#ifdef CONFIG_PAX_REFCOUNT
5445+ "tvs %%icc, 6\n"
5446+#endif
5447+
5448+ : "=r" (new)
5449+ : "0" (c), "ir" (a)
5450+ : "cc");
5451+
5452+ old = atomic_cmpxchg(v, c, new);
5453 if (likely(old == c))
5454 break;
5455 c = old;
5456 }
5457- return c != (u);
5458+ return c != u;
5459 }
5460
5461 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
5462@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
5463 #define atomic64_cmpxchg(v, o, n) \
5464 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5465 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5466+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5467+{
5468+ return xchg(&v->counter, new);
5469+}
5470
5471 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5472 {
5473- long c, old;
5474+ long c, old, new;
5475 c = atomic64_read(v);
5476 for (;;) {
5477- if (unlikely(c == (u)))
5478+ if (unlikely(c == u))
5479 break;
5480- old = atomic64_cmpxchg((v), c, c + (a));
5481+
5482+ asm volatile("addcc %2, %0, %0\n"
5483+
5484+#ifdef CONFIG_PAX_REFCOUNT
5485+ "tvs %%xcc, 6\n"
5486+#endif
5487+
5488+ : "=r" (new)
5489+ : "0" (c), "ir" (a)
5490+ : "cc");
5491+
5492+ old = atomic64_cmpxchg(v, c, new);
5493 if (likely(old == c))
5494 break;
5495 c = old;
5496 }
5497- return c != (u);
5498+ return c != u;
5499 }
5500
5501 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5502diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
5503index 41f85ae..73b80b5 100644
5504--- a/arch/sparc/include/asm/cache.h
5505+++ b/arch/sparc/include/asm/cache.h
5506@@ -7,8 +7,10 @@
5507 #ifndef _SPARC_CACHE_H
5508 #define _SPARC_CACHE_H
5509
5510+#include <linux/const.h>
5511+
5512 #define L1_CACHE_SHIFT 5
5513-#define L1_CACHE_BYTES 32
5514+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5515 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
5516
5517 #ifdef CONFIG_SPARC32
5518diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
5519index 5a8c308..38def92 100644
5520--- a/arch/sparc/include/asm/dma-mapping.h
5521+++ b/arch/sparc/include/asm/dma-mapping.h
5522@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
5523 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
5524 #define dma_is_consistent(d, h) (1)
5525
5526-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
5527+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
5528 extern struct bus_type pci_bus_type;
5529
5530-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
5531+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
5532 {
5533 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
5534 if (dev->bus == &pci_bus_type)
5535@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
5536 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
5537 dma_addr_t *dma_handle, gfp_t flag)
5538 {
5539- struct dma_map_ops *ops = get_dma_ops(dev);
5540+ const struct dma_map_ops *ops = get_dma_ops(dev);
5541 void *cpu_addr;
5542
5543 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
5544@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
5545 static inline void dma_free_coherent(struct device *dev, size_t size,
5546 void *cpu_addr, dma_addr_t dma_handle)
5547 {
5548- struct dma_map_ops *ops = get_dma_ops(dev);
5549+ const struct dma_map_ops *ops = get_dma_ops(dev);
5550
5551 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
5552 ops->free_coherent(dev, size, cpu_addr, dma_handle);
5553diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5554index 381a1b5..b97e3ff 100644
5555--- a/arch/sparc/include/asm/elf_32.h
5556+++ b/arch/sparc/include/asm/elf_32.h
5557@@ -116,6 +116,13 @@ typedef struct {
5558
5559 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5560
5561+#ifdef CONFIG_PAX_ASLR
5562+#define PAX_ELF_ET_DYN_BASE 0x10000UL
5563+
5564+#define PAX_DELTA_MMAP_LEN 16
5565+#define PAX_DELTA_STACK_LEN 16
5566+#endif
5567+
5568 /* This yields a mask that user programs can use to figure out what
5569 instruction set this cpu supports. This can NOT be done in userspace
5570 on Sparc. */
5571diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5572index 9968085..c2106ef 100644
5573--- a/arch/sparc/include/asm/elf_64.h
5574+++ b/arch/sparc/include/asm/elf_64.h
5575@@ -163,6 +163,12 @@ typedef struct {
5576 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5577 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5578
5579+#ifdef CONFIG_PAX_ASLR
5580+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5581+
5582+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5583+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
5584+#endif
5585
5586 /* This yields a mask that user programs can use to figure out what
5587 instruction set this cpu supports. */
5588diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
5589index 156707b..aefa786 100644
5590--- a/arch/sparc/include/asm/page_32.h
5591+++ b/arch/sparc/include/asm/page_32.h
5592@@ -8,6 +8,8 @@
5593 #ifndef _SPARC_PAGE_H
5594 #define _SPARC_PAGE_H
5595
5596+#include <linux/const.h>
5597+
5598 #define PAGE_SHIFT 12
5599
5600 #ifndef __ASSEMBLY__
5601diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
5602index e0cabe7..efd60f1 100644
5603--- a/arch/sparc/include/asm/pgtable_32.h
5604+++ b/arch/sparc/include/asm/pgtable_32.h
5605@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
5606 BTFIXUPDEF_INT(page_none)
5607 BTFIXUPDEF_INT(page_copy)
5608 BTFIXUPDEF_INT(page_readonly)
5609+
5610+#ifdef CONFIG_PAX_PAGEEXEC
5611+BTFIXUPDEF_INT(page_shared_noexec)
5612+BTFIXUPDEF_INT(page_copy_noexec)
5613+BTFIXUPDEF_INT(page_readonly_noexec)
5614+#endif
5615+
5616 BTFIXUPDEF_INT(page_kernel)
5617
5618 #define PMD_SHIFT SUN4C_PMD_SHIFT
5619@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
5620 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
5621 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
5622
5623+#ifdef CONFIG_PAX_PAGEEXEC
5624+extern pgprot_t PAGE_SHARED_NOEXEC;
5625+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
5626+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
5627+#else
5628+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5629+# define PAGE_COPY_NOEXEC PAGE_COPY
5630+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5631+#endif
5632+
5633 extern unsigned long page_kernel;
5634
5635 #ifdef MODULE
5636diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5637index 1407c07..7e10231 100644
5638--- a/arch/sparc/include/asm/pgtsrmmu.h
5639+++ b/arch/sparc/include/asm/pgtsrmmu.h
5640@@ -115,6 +115,13 @@
5641 SRMMU_EXEC | SRMMU_REF)
5642 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5643 SRMMU_EXEC | SRMMU_REF)
5644+
5645+#ifdef CONFIG_PAX_PAGEEXEC
5646+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5647+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5648+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5649+#endif
5650+
5651 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5652 SRMMU_DIRTY | SRMMU_REF)
5653
5654diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5655index 43e5147..47622a1 100644
5656--- a/arch/sparc/include/asm/spinlock_64.h
5657+++ b/arch/sparc/include/asm/spinlock_64.h
5658@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
5659
5660 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5661
5662-static void inline arch_read_lock(raw_rwlock_t *lock)
5663+static inline void arch_read_lock(raw_rwlock_t *lock)
5664 {
5665 unsigned long tmp1, tmp2;
5666
5667 __asm__ __volatile__ (
5668 "1: ldsw [%2], %0\n"
5669 " brlz,pn %0, 2f\n"
5670-"4: add %0, 1, %1\n"
5671+"4: addcc %0, 1, %1\n"
5672+
5673+#ifdef CONFIG_PAX_REFCOUNT
5674+" tvs %%icc, 6\n"
5675+#endif
5676+
5677 " cas [%2], %0, %1\n"
5678 " cmp %0, %1\n"
5679 " bne,pn %%icc, 1b\n"
5680@@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
5681 " .previous"
5682 : "=&r" (tmp1), "=&r" (tmp2)
5683 : "r" (lock)
5684- : "memory");
5685+ : "memory", "cc");
5686 }
5687
5688-static int inline arch_read_trylock(raw_rwlock_t *lock)
5689+static inline int arch_read_trylock(raw_rwlock_t *lock)
5690 {
5691 int tmp1, tmp2;
5692
5693@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
5694 "1: ldsw [%2], %0\n"
5695 " brlz,a,pn %0, 2f\n"
5696 " mov 0, %0\n"
5697-" add %0, 1, %1\n"
5698+" addcc %0, 1, %1\n"
5699+
5700+#ifdef CONFIG_PAX_REFCOUNT
5701+" tvs %%icc, 6\n"
5702+#endif
5703+
5704 " cas [%2], %0, %1\n"
5705 " cmp %0, %1\n"
5706 " bne,pn %%icc, 1b\n"
5707@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
5708 return tmp1;
5709 }
5710
5711-static void inline arch_read_unlock(raw_rwlock_t *lock)
5712+static inline void arch_read_unlock(raw_rwlock_t *lock)
5713 {
5714 unsigned long tmp1, tmp2;
5715
5716 __asm__ __volatile__(
5717 "1: lduw [%2], %0\n"
5718-" sub %0, 1, %1\n"
5719+" subcc %0, 1, %1\n"
5720+
5721+#ifdef CONFIG_PAX_REFCOUNT
5722+" tvs %%icc, 6\n"
5723+#endif
5724+
5725 " cas [%2], %0, %1\n"
5726 " cmp %0, %1\n"
5727 " bne,pn %%xcc, 1b\n"
5728@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
5729 : "memory");
5730 }
5731
5732-static void inline arch_write_lock(raw_rwlock_t *lock)
5733+static inline void arch_write_lock(raw_rwlock_t *lock)
5734 {
5735 unsigned long mask, tmp1, tmp2;
5736
5737@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
5738 : "memory");
5739 }
5740
5741-static void inline arch_write_unlock(raw_rwlock_t *lock)
5742+static inline void arch_write_unlock(raw_rwlock_t *lock)
5743 {
5744 __asm__ __volatile__(
5745 " stw %%g0, [%0]"
5746@@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
5747 : "memory");
5748 }
5749
5750-static int inline arch_write_trylock(raw_rwlock_t *lock)
5751+static inline int arch_write_trylock(raw_rwlock_t *lock)
5752 {
5753 unsigned long mask, tmp1, tmp2, result;
5754
5755diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5756index 844d73a..f787fb9 100644
5757--- a/arch/sparc/include/asm/thread_info_32.h
5758+++ b/arch/sparc/include/asm/thread_info_32.h
5759@@ -50,6 +50,8 @@ struct thread_info {
5760 unsigned long w_saved;
5761
5762 struct restart_block restart_block;
5763+
5764+ unsigned long lowest_stack;
5765 };
5766
5767 /*
5768diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5769index f78ad9a..9f55fc7 100644
5770--- a/arch/sparc/include/asm/thread_info_64.h
5771+++ b/arch/sparc/include/asm/thread_info_64.h
5772@@ -68,6 +68,8 @@ struct thread_info {
5773 struct pt_regs *kern_una_regs;
5774 unsigned int kern_una_insn;
5775
5776+ unsigned long lowest_stack;
5777+
5778 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5779 };
5780
5781diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5782index e88fbe5..96b0ce5 100644
5783--- a/arch/sparc/include/asm/uaccess.h
5784+++ b/arch/sparc/include/asm/uaccess.h
5785@@ -1,5 +1,13 @@
5786 #ifndef ___ASM_SPARC_UACCESS_H
5787 #define ___ASM_SPARC_UACCESS_H
5788+
5789+#ifdef __KERNEL__
5790+#ifndef __ASSEMBLY__
5791+#include <linux/types.h>
5792+extern void check_object_size(const void *ptr, unsigned long n, bool to);
5793+#endif
5794+#endif
5795+
5796 #if defined(__sparc__) && defined(__arch64__)
5797 #include <asm/uaccess_64.h>
5798 #else
5799diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5800index 8303ac4..07f333d 100644
5801--- a/arch/sparc/include/asm/uaccess_32.h
5802+++ b/arch/sparc/include/asm/uaccess_32.h
5803@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5804
5805 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5806 {
5807- if (n && __access_ok((unsigned long) to, n))
5808+ if ((long)n < 0)
5809+ return n;
5810+
5811+ if (n && __access_ok((unsigned long) to, n)) {
5812+ if (!__builtin_constant_p(n))
5813+ check_object_size(from, n, true);
5814 return __copy_user(to, (__force void __user *) from, n);
5815- else
5816+ } else
5817 return n;
5818 }
5819
5820 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5821 {
5822+ if ((long)n < 0)
5823+ return n;
5824+
5825+ if (!__builtin_constant_p(n))
5826+ check_object_size(from, n, true);
5827+
5828 return __copy_user(to, (__force void __user *) from, n);
5829 }
5830
5831 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5832 {
5833- if (n && __access_ok((unsigned long) from, n))
5834+ if ((long)n < 0)
5835+ return n;
5836+
5837+ if (n && __access_ok((unsigned long) from, n)) {
5838+ if (!__builtin_constant_p(n))
5839+ check_object_size(to, n, false);
5840 return __copy_user((__force void __user *) to, from, n);
5841- else
5842+ } else
5843 return n;
5844 }
5845
5846 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5847 {
5848+ if ((long)n < 0)
5849+ return n;
5850+
5851 return __copy_user((__force void __user *) to, from, n);
5852 }
5853
5854diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5855index 9ea271e..7b8a271 100644
5856--- a/arch/sparc/include/asm/uaccess_64.h
5857+++ b/arch/sparc/include/asm/uaccess_64.h
5858@@ -9,6 +9,7 @@
5859 #include <linux/compiler.h>
5860 #include <linux/string.h>
5861 #include <linux/thread_info.h>
5862+#include <linux/kernel.h>
5863 #include <asm/asi.h>
5864 #include <asm/system.h>
5865 #include <asm/spitfire.h>
5866@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5867 static inline unsigned long __must_check
5868 copy_from_user(void *to, const void __user *from, unsigned long size)
5869 {
5870- unsigned long ret = ___copy_from_user(to, from, size);
5871+ unsigned long ret;
5872
5873+ if ((long)size < 0 || size > INT_MAX)
5874+ return size;
5875+
5876+ if (!__builtin_constant_p(size))
5877+ check_object_size(to, size, false);
5878+
5879+ ret = ___copy_from_user(to, from, size);
5880 if (unlikely(ret))
5881 ret = copy_from_user_fixup(to, from, size);
5882 return ret;
5883@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5884 static inline unsigned long __must_check
5885 copy_to_user(void __user *to, const void *from, unsigned long size)
5886 {
5887- unsigned long ret = ___copy_to_user(to, from, size);
5888+ unsigned long ret;
5889
5890+ if ((long)size < 0 || size > INT_MAX)
5891+ return size;
5892+
5893+ if (!__builtin_constant_p(size))
5894+ check_object_size(from, size, true);
5895+
5896+ ret = ___copy_to_user(to, from, size);
5897 if (unlikely(ret))
5898 ret = copy_to_user_fixup(to, from, size);
5899 return ret;
5900diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5901index 2782681..77ded84 100644
5902--- a/arch/sparc/kernel/Makefile
5903+++ b/arch/sparc/kernel/Makefile
5904@@ -3,7 +3,7 @@
5905 #
5906
5907 asflags-y := -ansi
5908-ccflags-y := -Werror
5909+#ccflags-y := -Werror
5910
5911 extra-y := head_$(BITS).o
5912 extra-y += init_task.o
5913diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
5914index 7690cc2..ece64c9 100644
5915--- a/arch/sparc/kernel/iommu.c
5916+++ b/arch/sparc/kernel/iommu.c
5917@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
5918 spin_unlock_irqrestore(&iommu->lock, flags);
5919 }
5920
5921-static struct dma_map_ops sun4u_dma_ops = {
5922+static const struct dma_map_ops sun4u_dma_ops = {
5923 .alloc_coherent = dma_4u_alloc_coherent,
5924 .free_coherent = dma_4u_free_coherent,
5925 .map_page = dma_4u_map_page,
5926@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
5927 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
5928 };
5929
5930-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5931+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5932 EXPORT_SYMBOL(dma_ops);
5933
5934 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
5935diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
5936index 9f61fd8..bd048db 100644
5937--- a/arch/sparc/kernel/ioport.c
5938+++ b/arch/sparc/kernel/ioport.c
5939@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
5940 BUG();
5941 }
5942
5943-struct dma_map_ops sbus_dma_ops = {
5944+const struct dma_map_ops sbus_dma_ops = {
5945 .alloc_coherent = sbus_alloc_coherent,
5946 .free_coherent = sbus_free_coherent,
5947 .map_page = sbus_map_page,
5948@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
5949 .sync_sg_for_device = sbus_sync_sg_for_device,
5950 };
5951
5952-struct dma_map_ops *dma_ops = &sbus_dma_ops;
5953+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
5954 EXPORT_SYMBOL(dma_ops);
5955
5956 static int __init sparc_register_ioport(void)
5957@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
5958 }
5959 }
5960
5961-struct dma_map_ops pci32_dma_ops = {
5962+const struct dma_map_ops pci32_dma_ops = {
5963 .alloc_coherent = pci32_alloc_coherent,
5964 .free_coherent = pci32_free_coherent,
5965 .map_page = pci32_map_page,
5966diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
5967index 04df4ed..55c4b6e 100644
5968--- a/arch/sparc/kernel/kgdb_32.c
5969+++ b/arch/sparc/kernel/kgdb_32.c
5970@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
5971 {
5972 }
5973
5974-struct kgdb_arch arch_kgdb_ops = {
5975+const struct kgdb_arch arch_kgdb_ops = {
5976 /* Breakpoint instruction: ta 0x7d */
5977 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
5978 };
5979diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
5980index f5a0fd4..d886f71 100644
5981--- a/arch/sparc/kernel/kgdb_64.c
5982+++ b/arch/sparc/kernel/kgdb_64.c
5983@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
5984 {
5985 }
5986
5987-struct kgdb_arch arch_kgdb_ops = {
5988+const struct kgdb_arch arch_kgdb_ops = {
5989 /* Breakpoint instruction: ta 0x72 */
5990 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
5991 };
5992diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
5993index 23c33ff..d137fbd 100644
5994--- a/arch/sparc/kernel/pci_sun4v.c
5995+++ b/arch/sparc/kernel/pci_sun4v.c
5996@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
5997 spin_unlock_irqrestore(&iommu->lock, flags);
5998 }
5999
6000-static struct dma_map_ops sun4v_dma_ops = {
6001+static const struct dma_map_ops sun4v_dma_ops = {
6002 .alloc_coherent = dma_4v_alloc_coherent,
6003 .free_coherent = dma_4v_free_coherent,
6004 .map_page = dma_4v_map_page,
6005diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
6006index c49865b..b41a81b 100644
6007--- a/arch/sparc/kernel/process_32.c
6008+++ b/arch/sparc/kernel/process_32.c
6009@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
6010 rw->ins[4], rw->ins[5],
6011 rw->ins[6],
6012 rw->ins[7]);
6013- printk("%pS\n", (void *) rw->ins[7]);
6014+ printk("%pA\n", (void *) rw->ins[7]);
6015 rw = (struct reg_window32 *) rw->ins[6];
6016 }
6017 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
6018@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
6019
6020 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
6021 r->psr, r->pc, r->npc, r->y, print_tainted());
6022- printk("PC: <%pS>\n", (void *) r->pc);
6023+ printk("PC: <%pA>\n", (void *) r->pc);
6024 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6025 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
6026 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
6027 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6028 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
6029 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
6030- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
6031+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
6032
6033 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6034 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
6035@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6036 rw = (struct reg_window32 *) fp;
6037 pc = rw->ins[7];
6038 printk("[%08lx : ", pc);
6039- printk("%pS ] ", (void *) pc);
6040+ printk("%pA ] ", (void *) pc);
6041 fp = rw->ins[6];
6042 } while (++count < 16);
6043 printk("\n");
6044diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
6045index cb70476..3d0c191 100644
6046--- a/arch/sparc/kernel/process_64.c
6047+++ b/arch/sparc/kernel/process_64.c
6048@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
6049 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
6050 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
6051 if (regs->tstate & TSTATE_PRIV)
6052- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
6053+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
6054 }
6055
6056 void show_regs(struct pt_regs *regs)
6057 {
6058 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
6059 regs->tpc, regs->tnpc, regs->y, print_tainted());
6060- printk("TPC: <%pS>\n", (void *) regs->tpc);
6061+ printk("TPC: <%pA>\n", (void *) regs->tpc);
6062 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
6063 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
6064 regs->u_regs[3]);
6065@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
6066 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
6067 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
6068 regs->u_regs[15]);
6069- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
6070+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
6071 show_regwindow(regs);
6072 }
6073
6074@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
6075 ((tp && tp->task) ? tp->task->pid : -1));
6076
6077 if (gp->tstate & TSTATE_PRIV) {
6078- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
6079+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
6080 (void *) gp->tpc,
6081 (void *) gp->o7,
6082 (void *) gp->i7,
6083diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
6084index 6edc4e5..06a69b4 100644
6085--- a/arch/sparc/kernel/sigutil_64.c
6086+++ b/arch/sparc/kernel/sigutil_64.c
6087@@ -2,6 +2,7 @@
6088 #include <linux/types.h>
6089 #include <linux/thread_info.h>
6090 #include <linux/uaccess.h>
6091+#include <linux/errno.h>
6092
6093 #include <asm/sigcontext.h>
6094 #include <asm/fpumacro.h>
6095diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
6096index 3a82e65..ce0a53a 100644
6097--- a/arch/sparc/kernel/sys_sparc_32.c
6098+++ b/arch/sparc/kernel/sys_sparc_32.c
6099@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6100 if (ARCH_SUN4C && len > 0x20000000)
6101 return -ENOMEM;
6102 if (!addr)
6103- addr = TASK_UNMAPPED_BASE;
6104+ addr = current->mm->mmap_base;
6105
6106 if (flags & MAP_SHARED)
6107 addr = COLOUR_ALIGN(addr);
6108@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6109 }
6110 if (TASK_SIZE - PAGE_SIZE - len < addr)
6111 return -ENOMEM;
6112- if (!vmm || addr + len <= vmm->vm_start)
6113+ if (check_heap_stack_gap(vmm, addr, len))
6114 return addr;
6115 addr = vmm->vm_end;
6116 if (flags & MAP_SHARED)
6117diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
6118index cfa0e19..98972ac 100644
6119--- a/arch/sparc/kernel/sys_sparc_64.c
6120+++ b/arch/sparc/kernel/sys_sparc_64.c
6121@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6122 /* We do not accept a shared mapping if it would violate
6123 * cache aliasing constraints.
6124 */
6125- if ((flags & MAP_SHARED) &&
6126+ if ((filp || (flags & MAP_SHARED)) &&
6127 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
6128 return -EINVAL;
6129 return addr;
6130@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6131 if (filp || (flags & MAP_SHARED))
6132 do_color_align = 1;
6133
6134+#ifdef CONFIG_PAX_RANDMMAP
6135+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6136+#endif
6137+
6138 if (addr) {
6139 if (do_color_align)
6140 addr = COLOUR_ALIGN(addr, pgoff);
6141@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6142 addr = PAGE_ALIGN(addr);
6143
6144 vma = find_vma(mm, addr);
6145- if (task_size - len >= addr &&
6146- (!vma || addr + len <= vma->vm_start))
6147+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6148 return addr;
6149 }
6150
6151 if (len > mm->cached_hole_size) {
6152- start_addr = addr = mm->free_area_cache;
6153+ start_addr = addr = mm->free_area_cache;
6154 } else {
6155- start_addr = addr = TASK_UNMAPPED_BASE;
6156+ start_addr = addr = mm->mmap_base;
6157 mm->cached_hole_size = 0;
6158 }
6159
6160@@ -175,14 +178,14 @@ full_search:
6161 vma = find_vma(mm, VA_EXCLUDE_END);
6162 }
6163 if (unlikely(task_size < addr)) {
6164- if (start_addr != TASK_UNMAPPED_BASE) {
6165- start_addr = addr = TASK_UNMAPPED_BASE;
6166+ if (start_addr != mm->mmap_base) {
6167+ start_addr = addr = mm->mmap_base;
6168 mm->cached_hole_size = 0;
6169 goto full_search;
6170 }
6171 return -ENOMEM;
6172 }
6173- if (likely(!vma || addr + len <= vma->vm_start)) {
6174+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6175 /*
6176 * Remember the place where we stopped the search:
6177 */
6178@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6179 /* We do not accept a shared mapping if it would violate
6180 * cache aliasing constraints.
6181 */
6182- if ((flags & MAP_SHARED) &&
6183+ if ((filp || (flags & MAP_SHARED)) &&
6184 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
6185 return -EINVAL;
6186 return addr;
6187@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6188 addr = PAGE_ALIGN(addr);
6189
6190 vma = find_vma(mm, addr);
6191- if (task_size - len >= addr &&
6192- (!vma || addr + len <= vma->vm_start))
6193+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6194 return addr;
6195 }
6196
6197@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6198 /* make sure it can fit in the remaining address space */
6199 if (likely(addr > len)) {
6200 vma = find_vma(mm, addr-len);
6201- if (!vma || addr <= vma->vm_start) {
6202+ if (check_heap_stack_gap(vma, addr - len, len)) {
6203 /* remember the address as a hint for next time */
6204 return (mm->free_area_cache = addr-len);
6205 }
6206@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6207 if (unlikely(mm->mmap_base < len))
6208 goto bottomup;
6209
6210- addr = mm->mmap_base-len;
6211- if (do_color_align)
6212- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6213+ addr = mm->mmap_base - len;
6214
6215 do {
6216+ if (do_color_align)
6217+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6218 /*
6219 * Lookup failure means no vma is above this address,
6220 * else if new region fits below vma->vm_start,
6221 * return with success:
6222 */
6223 vma = find_vma(mm, addr);
6224- if (likely(!vma || addr+len <= vma->vm_start)) {
6225+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6226 /* remember the address as a hint for next time */
6227 return (mm->free_area_cache = addr);
6228 }
6229@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6230 mm->cached_hole_size = vma->vm_start - addr;
6231
6232 /* try just below the current vma->vm_start */
6233- addr = vma->vm_start-len;
6234- if (do_color_align)
6235- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6236- } while (likely(len < vma->vm_start));
6237+ addr = skip_heap_stack_gap(vma, len);
6238+ } while (!IS_ERR_VALUE(addr));
6239
6240 bottomup:
6241 /*
6242@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6243 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
6244 sysctl_legacy_va_layout) {
6245 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
6246+
6247+#ifdef CONFIG_PAX_RANDMMAP
6248+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6249+ mm->mmap_base += mm->delta_mmap;
6250+#endif
6251+
6252 mm->get_unmapped_area = arch_get_unmapped_area;
6253 mm->unmap_area = arch_unmap_area;
6254 } else {
6255@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6256 gap = (task_size / 6 * 5);
6257
6258 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
6259+
6260+#ifdef CONFIG_PAX_RANDMMAP
6261+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6262+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6263+#endif
6264+
6265 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6266 mm->unmap_area = arch_unmap_area_topdown;
6267 }
6268diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
6269index c0490c7..84959d1 100644
6270--- a/arch/sparc/kernel/traps_32.c
6271+++ b/arch/sparc/kernel/traps_32.c
6272@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
6273 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
6274 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
6275
6276+extern void gr_handle_kernel_exploit(void);
6277+
6278 void die_if_kernel(char *str, struct pt_regs *regs)
6279 {
6280 static int die_counter;
6281@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6282 count++ < 30 &&
6283 (((unsigned long) rw) >= PAGE_OFFSET) &&
6284 !(((unsigned long) rw) & 0x7)) {
6285- printk("Caller[%08lx]: %pS\n", rw->ins[7],
6286+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
6287 (void *) rw->ins[7]);
6288 rw = (struct reg_window32 *)rw->ins[6];
6289 }
6290 }
6291 printk("Instruction DUMP:");
6292 instruction_dump ((unsigned long *) regs->pc);
6293- if(regs->psr & PSR_PS)
6294+ if(regs->psr & PSR_PS) {
6295+ gr_handle_kernel_exploit();
6296 do_exit(SIGKILL);
6297+ }
6298 do_exit(SIGSEGV);
6299 }
6300
6301diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
6302index 10f7bb9..cdb6793 100644
6303--- a/arch/sparc/kernel/traps_64.c
6304+++ b/arch/sparc/kernel/traps_64.c
6305@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
6306 i + 1,
6307 p->trapstack[i].tstate, p->trapstack[i].tpc,
6308 p->trapstack[i].tnpc, p->trapstack[i].tt);
6309- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
6310+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
6311 }
6312 }
6313
6314@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
6315
6316 lvl -= 0x100;
6317 if (regs->tstate & TSTATE_PRIV) {
6318+
6319+#ifdef CONFIG_PAX_REFCOUNT
6320+ if (lvl == 6)
6321+ pax_report_refcount_overflow(regs);
6322+#endif
6323+
6324 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
6325 die_if_kernel(buffer, regs);
6326 }
6327@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
6328 void bad_trap_tl1(struct pt_regs *regs, long lvl)
6329 {
6330 char buffer[32];
6331-
6332+
6333 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
6334 0, lvl, SIGTRAP) == NOTIFY_STOP)
6335 return;
6336
6337+#ifdef CONFIG_PAX_REFCOUNT
6338+ if (lvl == 6)
6339+ pax_report_refcount_overflow(regs);
6340+#endif
6341+
6342 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6343
6344 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
6345@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
6346 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
6347 printk("%s" "ERROR(%d): ",
6348 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
6349- printk("TPC<%pS>\n", (void *) regs->tpc);
6350+ printk("TPC<%pA>\n", (void *) regs->tpc);
6351 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
6352 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
6353 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
6354@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6355 smp_processor_id(),
6356 (type & 0x1) ? 'I' : 'D',
6357 regs->tpc);
6358- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
6359+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
6360 panic("Irrecoverable Cheetah+ parity error.");
6361 }
6362
6363@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6364 smp_processor_id(),
6365 (type & 0x1) ? 'I' : 'D',
6366 regs->tpc);
6367- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
6368+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
6369 }
6370
6371 struct sun4v_error_entry {
6372@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
6373
6374 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
6375 regs->tpc, tl);
6376- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6377+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
6378 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6379- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
6380+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
6381 (void *) regs->u_regs[UREG_I7]);
6382 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
6383 "pte[%lx] error[%lx]\n",
6384@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
6385
6386 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
6387 regs->tpc, tl);
6388- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6389+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
6390 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6391- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
6392+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
6393 (void *) regs->u_regs[UREG_I7]);
6394 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
6395 "pte[%lx] error[%lx]\n",
6396@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6397 fp = (unsigned long)sf->fp + STACK_BIAS;
6398 }
6399
6400- printk(" [%016lx] %pS\n", pc, (void *) pc);
6401+ printk(" [%016lx] %pA\n", pc, (void *) pc);
6402 } while (++count < 16);
6403 }
6404
6405@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
6406 return (struct reg_window *) (fp + STACK_BIAS);
6407 }
6408
6409+extern void gr_handle_kernel_exploit(void);
6410+
6411 void die_if_kernel(char *str, struct pt_regs *regs)
6412 {
6413 static int die_counter;
6414@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6415 while (rw &&
6416 count++ < 30&&
6417 is_kernel_stack(current, rw)) {
6418- printk("Caller[%016lx]: %pS\n", rw->ins[7],
6419+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
6420 (void *) rw->ins[7]);
6421
6422 rw = kernel_stack_up(rw);
6423@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6424 }
6425 user_instruction_dump ((unsigned int __user *) regs->tpc);
6426 }
6427- if (regs->tstate & TSTATE_PRIV)
6428+ if (regs->tstate & TSTATE_PRIV) {
6429+ gr_handle_kernel_exploit();
6430 do_exit(SIGKILL);
6431+ }
6432+
6433 do_exit(SIGSEGV);
6434 }
6435 EXPORT_SYMBOL(die_if_kernel);
6436diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
6437index be183fe..1c8d332 100644
6438--- a/arch/sparc/kernel/una_asm_64.S
6439+++ b/arch/sparc/kernel/una_asm_64.S
6440@@ -127,7 +127,7 @@ do_int_load:
6441 wr %o5, 0x0, %asi
6442 retl
6443 mov 0, %o0
6444- .size __do_int_load, .-__do_int_load
6445+ .size do_int_load, .-do_int_load
6446
6447 .section __ex_table,"a"
6448 .word 4b, __retl_efault
6449diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
6450index 3792099..2af17d8 100644
6451--- a/arch/sparc/kernel/unaligned_64.c
6452+++ b/arch/sparc/kernel/unaligned_64.c
6453@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
6454 if (count < 5) {
6455 last_time = jiffies;
6456 count++;
6457- printk("Kernel unaligned access at TPC[%lx] %pS\n",
6458+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
6459 regs->tpc, (void *) regs->tpc);
6460 }
6461 }
6462diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6463index e75faf0..24f12f9 100644
6464--- a/arch/sparc/lib/Makefile
6465+++ b/arch/sparc/lib/Makefile
6466@@ -2,7 +2,7 @@
6467 #
6468
6469 asflags-y := -ansi -DST_DIV0=0x02
6470-ccflags-y := -Werror
6471+#ccflags-y := -Werror
6472
6473 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
6474 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6475diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6476index 0268210..f0291ca 100644
6477--- a/arch/sparc/lib/atomic_64.S
6478+++ b/arch/sparc/lib/atomic_64.S
6479@@ -18,7 +18,12 @@
6480 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6481 BACKOFF_SETUP(%o2)
6482 1: lduw [%o1], %g1
6483- add %g1, %o0, %g7
6484+ addcc %g1, %o0, %g7
6485+
6486+#ifdef CONFIG_PAX_REFCOUNT
6487+ tvs %icc, 6
6488+#endif
6489+
6490 cas [%o1], %g1, %g7
6491 cmp %g1, %g7
6492 bne,pn %icc, 2f
6493@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6494 2: BACKOFF_SPIN(%o2, %o3, 1b)
6495 .size atomic_add, .-atomic_add
6496
6497+ .globl atomic_add_unchecked
6498+ .type atomic_add_unchecked,#function
6499+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6500+ BACKOFF_SETUP(%o2)
6501+1: lduw [%o1], %g1
6502+ add %g1, %o0, %g7
6503+ cas [%o1], %g1, %g7
6504+ cmp %g1, %g7
6505+ bne,pn %icc, 2f
6506+ nop
6507+ retl
6508+ nop
6509+2: BACKOFF_SPIN(%o2, %o3, 1b)
6510+ .size atomic_add_unchecked, .-atomic_add_unchecked
6511+
6512 .globl atomic_sub
6513 .type atomic_sub,#function
6514 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6515 BACKOFF_SETUP(%o2)
6516 1: lduw [%o1], %g1
6517- sub %g1, %o0, %g7
6518+ subcc %g1, %o0, %g7
6519+
6520+#ifdef CONFIG_PAX_REFCOUNT
6521+ tvs %icc, 6
6522+#endif
6523+
6524 cas [%o1], %g1, %g7
6525 cmp %g1, %g7
6526 bne,pn %icc, 2f
6527@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6528 2: BACKOFF_SPIN(%o2, %o3, 1b)
6529 .size atomic_sub, .-atomic_sub
6530
6531+ .globl atomic_sub_unchecked
6532+ .type atomic_sub_unchecked,#function
6533+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6534+ BACKOFF_SETUP(%o2)
6535+1: lduw [%o1], %g1
6536+ sub %g1, %o0, %g7
6537+ cas [%o1], %g1, %g7
6538+ cmp %g1, %g7
6539+ bne,pn %icc, 2f
6540+ nop
6541+ retl
6542+ nop
6543+2: BACKOFF_SPIN(%o2, %o3, 1b)
6544+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
6545+
6546 .globl atomic_add_ret
6547 .type atomic_add_ret,#function
6548 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6549 BACKOFF_SETUP(%o2)
6550 1: lduw [%o1], %g1
6551- add %g1, %o0, %g7
6552+ addcc %g1, %o0, %g7
6553+
6554+#ifdef CONFIG_PAX_REFCOUNT
6555+ tvs %icc, 6
6556+#endif
6557+
6558 cas [%o1], %g1, %g7
6559 cmp %g1, %g7
6560 bne,pn %icc, 2f
6561@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6562 2: BACKOFF_SPIN(%o2, %o3, 1b)
6563 .size atomic_add_ret, .-atomic_add_ret
6564
6565+ .globl atomic_add_ret_unchecked
6566+ .type atomic_add_ret_unchecked,#function
6567+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6568+ BACKOFF_SETUP(%o2)
6569+1: lduw [%o1], %g1
6570+ addcc %g1, %o0, %g7
6571+ cas [%o1], %g1, %g7
6572+ cmp %g1, %g7
6573+ bne,pn %icc, 2f
6574+ add %g7, %o0, %g7
6575+ sra %g7, 0, %o0
6576+ retl
6577+ nop
6578+2: BACKOFF_SPIN(%o2, %o3, 1b)
6579+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
6580+
6581 .globl atomic_sub_ret
6582 .type atomic_sub_ret,#function
6583 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6584 BACKOFF_SETUP(%o2)
6585 1: lduw [%o1], %g1
6586- sub %g1, %o0, %g7
6587+ subcc %g1, %o0, %g7
6588+
6589+#ifdef CONFIG_PAX_REFCOUNT
6590+ tvs %icc, 6
6591+#endif
6592+
6593 cas [%o1], %g1, %g7
6594 cmp %g1, %g7
6595 bne,pn %icc, 2f
6596@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6597 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6598 BACKOFF_SETUP(%o2)
6599 1: ldx [%o1], %g1
6600- add %g1, %o0, %g7
6601+ addcc %g1, %o0, %g7
6602+
6603+#ifdef CONFIG_PAX_REFCOUNT
6604+ tvs %xcc, 6
6605+#endif
6606+
6607 casx [%o1], %g1, %g7
6608 cmp %g1, %g7
6609 bne,pn %xcc, 2f
6610@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6611 2: BACKOFF_SPIN(%o2, %o3, 1b)
6612 .size atomic64_add, .-atomic64_add
6613
6614+ .globl atomic64_add_unchecked
6615+ .type atomic64_add_unchecked,#function
6616+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6617+ BACKOFF_SETUP(%o2)
6618+1: ldx [%o1], %g1
6619+ addcc %g1, %o0, %g7
6620+ casx [%o1], %g1, %g7
6621+ cmp %g1, %g7
6622+ bne,pn %xcc, 2f
6623+ nop
6624+ retl
6625+ nop
6626+2: BACKOFF_SPIN(%o2, %o3, 1b)
6627+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
6628+
6629 .globl atomic64_sub
6630 .type atomic64_sub,#function
6631 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6632 BACKOFF_SETUP(%o2)
6633 1: ldx [%o1], %g1
6634- sub %g1, %o0, %g7
6635+ subcc %g1, %o0, %g7
6636+
6637+#ifdef CONFIG_PAX_REFCOUNT
6638+ tvs %xcc, 6
6639+#endif
6640+
6641 casx [%o1], %g1, %g7
6642 cmp %g1, %g7
6643 bne,pn %xcc, 2f
6644@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6645 2: BACKOFF_SPIN(%o2, %o3, 1b)
6646 .size atomic64_sub, .-atomic64_sub
6647
6648+ .globl atomic64_sub_unchecked
6649+ .type atomic64_sub_unchecked,#function
6650+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6651+ BACKOFF_SETUP(%o2)
6652+1: ldx [%o1], %g1
6653+ subcc %g1, %o0, %g7
6654+ casx [%o1], %g1, %g7
6655+ cmp %g1, %g7
6656+ bne,pn %xcc, 2f
6657+ nop
6658+ retl
6659+ nop
6660+2: BACKOFF_SPIN(%o2, %o3, 1b)
6661+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
6662+
6663 .globl atomic64_add_ret
6664 .type atomic64_add_ret,#function
6665 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6666 BACKOFF_SETUP(%o2)
6667 1: ldx [%o1], %g1
6668- add %g1, %o0, %g7
6669+ addcc %g1, %o0, %g7
6670+
6671+#ifdef CONFIG_PAX_REFCOUNT
6672+ tvs %xcc, 6
6673+#endif
6674+
6675 casx [%o1], %g1, %g7
6676 cmp %g1, %g7
6677 bne,pn %xcc, 2f
6678@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6679 2: BACKOFF_SPIN(%o2, %o3, 1b)
6680 .size atomic64_add_ret, .-atomic64_add_ret
6681
6682+ .globl atomic64_add_ret_unchecked
6683+ .type atomic64_add_ret_unchecked,#function
6684+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6685+ BACKOFF_SETUP(%o2)
6686+1: ldx [%o1], %g1
6687+ addcc %g1, %o0, %g7
6688+ casx [%o1], %g1, %g7
6689+ cmp %g1, %g7
6690+ bne,pn %xcc, 2f
6691+ add %g7, %o0, %g7
6692+ mov %g7, %o0
6693+ retl
6694+ nop
6695+2: BACKOFF_SPIN(%o2, %o3, 1b)
6696+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
6697+
6698 .globl atomic64_sub_ret
6699 .type atomic64_sub_ret,#function
6700 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6701 BACKOFF_SETUP(%o2)
6702 1: ldx [%o1], %g1
6703- sub %g1, %o0, %g7
6704+ subcc %g1, %o0, %g7
6705+
6706+#ifdef CONFIG_PAX_REFCOUNT
6707+ tvs %xcc, 6
6708+#endif
6709+
6710 casx [%o1], %g1, %g7
6711 cmp %g1, %g7
6712 bne,pn %xcc, 2f
6713diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
6714index 704b126..2e79d76 100644
6715--- a/arch/sparc/lib/ksyms.c
6716+++ b/arch/sparc/lib/ksyms.c
6717@@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
6718
6719 /* Atomic counter implementation. */
6720 EXPORT_SYMBOL(atomic_add);
6721+EXPORT_SYMBOL(atomic_add_unchecked);
6722 EXPORT_SYMBOL(atomic_add_ret);
6723+EXPORT_SYMBOL(atomic_add_ret_unchecked);
6724 EXPORT_SYMBOL(atomic_sub);
6725+EXPORT_SYMBOL(atomic_sub_unchecked);
6726 EXPORT_SYMBOL(atomic_sub_ret);
6727 EXPORT_SYMBOL(atomic64_add);
6728+EXPORT_SYMBOL(atomic64_add_unchecked);
6729 EXPORT_SYMBOL(atomic64_add_ret);
6730+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
6731 EXPORT_SYMBOL(atomic64_sub);
6732+EXPORT_SYMBOL(atomic64_sub_unchecked);
6733 EXPORT_SYMBOL(atomic64_sub_ret);
6734
6735 /* Atomic bit operations. */
6736diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
6737index 91a7d29..ce75c29 100644
6738--- a/arch/sparc/lib/rwsem_64.S
6739+++ b/arch/sparc/lib/rwsem_64.S
6740@@ -11,7 +11,12 @@
6741 .globl __down_read
6742 __down_read:
6743 1: lduw [%o0], %g1
6744- add %g1, 1, %g7
6745+ addcc %g1, 1, %g7
6746+
6747+#ifdef CONFIG_PAX_REFCOUNT
6748+ tvs %icc, 6
6749+#endif
6750+
6751 cas [%o0], %g1, %g7
6752 cmp %g1, %g7
6753 bne,pn %icc, 1b
6754@@ -33,7 +38,12 @@ __down_read:
6755 .globl __down_read_trylock
6756 __down_read_trylock:
6757 1: lduw [%o0], %g1
6758- add %g1, 1, %g7
6759+ addcc %g1, 1, %g7
6760+
6761+#ifdef CONFIG_PAX_REFCOUNT
6762+ tvs %icc, 6
6763+#endif
6764+
6765 cmp %g7, 0
6766 bl,pn %icc, 2f
6767 mov 0, %o1
6768@@ -51,7 +61,12 @@ __down_write:
6769 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
6770 1:
6771 lduw [%o0], %g3
6772- add %g3, %g1, %g7
6773+ addcc %g3, %g1, %g7
6774+
6775+#ifdef CONFIG_PAX_REFCOUNT
6776+ tvs %icc, 6
6777+#endif
6778+
6779 cas [%o0], %g3, %g7
6780 cmp %g3, %g7
6781 bne,pn %icc, 1b
6782@@ -77,7 +92,12 @@ __down_write_trylock:
6783 cmp %g3, 0
6784 bne,pn %icc, 2f
6785 mov 0, %o1
6786- add %g3, %g1, %g7
6787+ addcc %g3, %g1, %g7
6788+
6789+#ifdef CONFIG_PAX_REFCOUNT
6790+ tvs %icc, 6
6791+#endif
6792+
6793 cas [%o0], %g3, %g7
6794 cmp %g3, %g7
6795 bne,pn %icc, 1b
6796@@ -90,7 +110,12 @@ __down_write_trylock:
6797 __up_read:
6798 1:
6799 lduw [%o0], %g1
6800- sub %g1, 1, %g7
6801+ subcc %g1, 1, %g7
6802+
6803+#ifdef CONFIG_PAX_REFCOUNT
6804+ tvs %icc, 6
6805+#endif
6806+
6807 cas [%o0], %g1, %g7
6808 cmp %g1, %g7
6809 bne,pn %icc, 1b
6810@@ -118,7 +143,12 @@ __up_write:
6811 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
6812 1:
6813 lduw [%o0], %g3
6814- sub %g3, %g1, %g7
6815+ subcc %g3, %g1, %g7
6816+
6817+#ifdef CONFIG_PAX_REFCOUNT
6818+ tvs %icc, 6
6819+#endif
6820+
6821 cas [%o0], %g3, %g7
6822 cmp %g3, %g7
6823 bne,pn %icc, 1b
6824@@ -143,7 +173,12 @@ __downgrade_write:
6825 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
6826 1:
6827 lduw [%o0], %g3
6828- sub %g3, %g1, %g7
6829+ subcc %g3, %g1, %g7
6830+
6831+#ifdef CONFIG_PAX_REFCOUNT
6832+ tvs %icc, 6
6833+#endif
6834+
6835 cas [%o0], %g3, %g7
6836 cmp %g3, %g7
6837 bne,pn %icc, 1b
6838diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6839index 79836a7..62f47a2 100644
6840--- a/arch/sparc/mm/Makefile
6841+++ b/arch/sparc/mm/Makefile
6842@@ -2,7 +2,7 @@
6843 #
6844
6845 asflags-y := -ansi
6846-ccflags-y := -Werror
6847+#ccflags-y := -Werror
6848
6849 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6850 obj-y += fault_$(BITS).o
6851diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6852index b99f81c..3453e93 100644
6853--- a/arch/sparc/mm/fault_32.c
6854+++ b/arch/sparc/mm/fault_32.c
6855@@ -21,6 +21,9 @@
6856 #include <linux/interrupt.h>
6857 #include <linux/module.h>
6858 #include <linux/kdebug.h>
6859+#include <linux/slab.h>
6860+#include <linux/pagemap.h>
6861+#include <linux/compiler.h>
6862
6863 #include <asm/system.h>
6864 #include <asm/page.h>
6865@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6866 return safe_compute_effective_address(regs, insn);
6867 }
6868
6869+#ifdef CONFIG_PAX_PAGEEXEC
6870+#ifdef CONFIG_PAX_DLRESOLVE
6871+static void pax_emuplt_close(struct vm_area_struct *vma)
6872+{
6873+ vma->vm_mm->call_dl_resolve = 0UL;
6874+}
6875+
6876+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6877+{
6878+ unsigned int *kaddr;
6879+
6880+ vmf->page = alloc_page(GFP_HIGHUSER);
6881+ if (!vmf->page)
6882+ return VM_FAULT_OOM;
6883+
6884+ kaddr = kmap(vmf->page);
6885+ memset(kaddr, 0, PAGE_SIZE);
6886+ kaddr[0] = 0x9DE3BFA8U; /* save */
6887+ flush_dcache_page(vmf->page);
6888+ kunmap(vmf->page);
6889+ return VM_FAULT_MAJOR;
6890+}
6891+
6892+static const struct vm_operations_struct pax_vm_ops = {
6893+ .close = pax_emuplt_close,
6894+ .fault = pax_emuplt_fault
6895+};
6896+
6897+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6898+{
6899+ int ret;
6900+
6901+ vma->vm_mm = current->mm;
6902+ vma->vm_start = addr;
6903+ vma->vm_end = addr + PAGE_SIZE;
6904+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6905+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6906+ vma->vm_ops = &pax_vm_ops;
6907+
6908+ ret = insert_vm_struct(current->mm, vma);
6909+ if (ret)
6910+ return ret;
6911+
6912+ ++current->mm->total_vm;
6913+ return 0;
6914+}
6915+#endif
6916+
6917+/*
6918+ * PaX: decide what to do with offenders (regs->pc = fault address)
6919+ *
6920+ * returns 1 when task should be killed
6921+ * 2 when patched PLT trampoline was detected
6922+ * 3 when unpatched PLT trampoline was detected
6923+ */
6924+static int pax_handle_fetch_fault(struct pt_regs *regs)
6925+{
6926+
6927+#ifdef CONFIG_PAX_EMUPLT
6928+ int err;
6929+
6930+ do { /* PaX: patched PLT emulation #1 */
6931+ unsigned int sethi1, sethi2, jmpl;
6932+
6933+ err = get_user(sethi1, (unsigned int *)regs->pc);
6934+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6935+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6936+
6937+ if (err)
6938+ break;
6939+
6940+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6941+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6942+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6943+ {
6944+ unsigned int addr;
6945+
6946+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6947+ addr = regs->u_regs[UREG_G1];
6948+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6949+ regs->pc = addr;
6950+ regs->npc = addr+4;
6951+ return 2;
6952+ }
6953+ } while (0);
6954+
6955+ { /* PaX: patched PLT emulation #2 */
6956+ unsigned int ba;
6957+
6958+ err = get_user(ba, (unsigned int *)regs->pc);
6959+
6960+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6961+ unsigned int addr;
6962+
6963+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6964+ regs->pc = addr;
6965+ regs->npc = addr+4;
6966+ return 2;
6967+ }
6968+ }
6969+
6970+ do { /* PaX: patched PLT emulation #3 */
6971+ unsigned int sethi, jmpl, nop;
6972+
6973+ err = get_user(sethi, (unsigned int *)regs->pc);
6974+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6975+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6976+
6977+ if (err)
6978+ break;
6979+
6980+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6981+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6982+ nop == 0x01000000U)
6983+ {
6984+ unsigned int addr;
6985+
6986+ addr = (sethi & 0x003FFFFFU) << 10;
6987+ regs->u_regs[UREG_G1] = addr;
6988+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6989+ regs->pc = addr;
6990+ regs->npc = addr+4;
6991+ return 2;
6992+ }
6993+ } while (0);
6994+
6995+ do { /* PaX: unpatched PLT emulation step 1 */
6996+ unsigned int sethi, ba, nop;
6997+
6998+ err = get_user(sethi, (unsigned int *)regs->pc);
6999+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
7000+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
7001+
7002+ if (err)
7003+ break;
7004+
7005+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7006+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7007+ nop == 0x01000000U)
7008+ {
7009+ unsigned int addr, save, call;
7010+
7011+ if ((ba & 0xFFC00000U) == 0x30800000U)
7012+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
7013+ else
7014+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
7015+
7016+ err = get_user(save, (unsigned int *)addr);
7017+ err |= get_user(call, (unsigned int *)(addr+4));
7018+ err |= get_user(nop, (unsigned int *)(addr+8));
7019+ if (err)
7020+ break;
7021+
7022+#ifdef CONFIG_PAX_DLRESOLVE
7023+ if (save == 0x9DE3BFA8U &&
7024+ (call & 0xC0000000U) == 0x40000000U &&
7025+ nop == 0x01000000U)
7026+ {
7027+ struct vm_area_struct *vma;
7028+ unsigned long call_dl_resolve;
7029+
7030+ down_read(&current->mm->mmap_sem);
7031+ call_dl_resolve = current->mm->call_dl_resolve;
7032+ up_read(&current->mm->mmap_sem);
7033+ if (likely(call_dl_resolve))
7034+ goto emulate;
7035+
7036+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7037+
7038+ down_write(&current->mm->mmap_sem);
7039+ if (current->mm->call_dl_resolve) {
7040+ call_dl_resolve = current->mm->call_dl_resolve;
7041+ up_write(&current->mm->mmap_sem);
7042+ if (vma)
7043+ kmem_cache_free(vm_area_cachep, vma);
7044+ goto emulate;
7045+ }
7046+
7047+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7048+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7049+ up_write(&current->mm->mmap_sem);
7050+ if (vma)
7051+ kmem_cache_free(vm_area_cachep, vma);
7052+ return 1;
7053+ }
7054+
7055+ if (pax_insert_vma(vma, call_dl_resolve)) {
7056+ up_write(&current->mm->mmap_sem);
7057+ kmem_cache_free(vm_area_cachep, vma);
7058+ return 1;
7059+ }
7060+
7061+ current->mm->call_dl_resolve = call_dl_resolve;
7062+ up_write(&current->mm->mmap_sem);
7063+
7064+emulate:
7065+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7066+ regs->pc = call_dl_resolve;
7067+ regs->npc = addr+4;
7068+ return 3;
7069+ }
7070+#endif
7071+
7072+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7073+ if ((save & 0xFFC00000U) == 0x05000000U &&
7074+ (call & 0xFFFFE000U) == 0x85C0A000U &&
7075+ nop == 0x01000000U)
7076+ {
7077+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7078+ regs->u_regs[UREG_G2] = addr + 4;
7079+ addr = (save & 0x003FFFFFU) << 10;
7080+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
7081+ regs->pc = addr;
7082+ regs->npc = addr+4;
7083+ return 3;
7084+ }
7085+ }
7086+ } while (0);
7087+
7088+ do { /* PaX: unpatched PLT emulation step 2 */
7089+ unsigned int save, call, nop;
7090+
7091+ err = get_user(save, (unsigned int *)(regs->pc-4));
7092+ err |= get_user(call, (unsigned int *)regs->pc);
7093+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
7094+ if (err)
7095+ break;
7096+
7097+ if (save == 0x9DE3BFA8U &&
7098+ (call & 0xC0000000U) == 0x40000000U &&
7099+ nop == 0x01000000U)
7100+ {
7101+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
7102+
7103+ regs->u_regs[UREG_RETPC] = regs->pc;
7104+ regs->pc = dl_resolve;
7105+ regs->npc = dl_resolve+4;
7106+ return 3;
7107+ }
7108+ } while (0);
7109+#endif
7110+
7111+ return 1;
7112+}
7113+
7114+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7115+{
7116+ unsigned long i;
7117+
7118+ printk(KERN_ERR "PAX: bytes at PC: ");
7119+ for (i = 0; i < 8; i++) {
7120+ unsigned int c;
7121+ if (get_user(c, (unsigned int *)pc+i))
7122+ printk(KERN_CONT "???????? ");
7123+ else
7124+ printk(KERN_CONT "%08x ", c);
7125+ }
7126+ printk("\n");
7127+}
7128+#endif
7129+
7130 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
7131 unsigned long address)
7132 {
7133@@ -231,6 +495,24 @@ good_area:
7134 if(!(vma->vm_flags & VM_WRITE))
7135 goto bad_area;
7136 } else {
7137+
7138+#ifdef CONFIG_PAX_PAGEEXEC
7139+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
7140+ up_read(&mm->mmap_sem);
7141+ switch (pax_handle_fetch_fault(regs)) {
7142+
7143+#ifdef CONFIG_PAX_EMUPLT
7144+ case 2:
7145+ case 3:
7146+ return;
7147+#endif
7148+
7149+ }
7150+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
7151+ do_group_exit(SIGKILL);
7152+ }
7153+#endif
7154+
7155 /* Allow reads even for write-only mappings */
7156 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
7157 goto bad_area;
7158diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
7159index 43b0da9..a0b78f9 100644
7160--- a/arch/sparc/mm/fault_64.c
7161+++ b/arch/sparc/mm/fault_64.c
7162@@ -20,6 +20,9 @@
7163 #include <linux/kprobes.h>
7164 #include <linux/kdebug.h>
7165 #include <linux/percpu.h>
7166+#include <linux/slab.h>
7167+#include <linux/pagemap.h>
7168+#include <linux/compiler.h>
7169
7170 #include <asm/page.h>
7171 #include <asm/pgtable.h>
7172@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
7173 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
7174 regs->tpc);
7175 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
7176- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
7177+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
7178 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
7179 dump_stack();
7180 unhandled_fault(regs->tpc, current, regs);
7181@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
7182 show_regs(regs);
7183 }
7184
7185+#ifdef CONFIG_PAX_PAGEEXEC
7186+#ifdef CONFIG_PAX_DLRESOLVE
7187+static void pax_emuplt_close(struct vm_area_struct *vma)
7188+{
7189+ vma->vm_mm->call_dl_resolve = 0UL;
7190+}
7191+
7192+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7193+{
7194+ unsigned int *kaddr;
7195+
7196+ vmf->page = alloc_page(GFP_HIGHUSER);
7197+ if (!vmf->page)
7198+ return VM_FAULT_OOM;
7199+
7200+ kaddr = kmap(vmf->page);
7201+ memset(kaddr, 0, PAGE_SIZE);
7202+ kaddr[0] = 0x9DE3BFA8U; /* save */
7203+ flush_dcache_page(vmf->page);
7204+ kunmap(vmf->page);
7205+ return VM_FAULT_MAJOR;
7206+}
7207+
7208+static const struct vm_operations_struct pax_vm_ops = {
7209+ .close = pax_emuplt_close,
7210+ .fault = pax_emuplt_fault
7211+};
7212+
7213+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
7214+{
7215+ int ret;
7216+
7217+ vma->vm_mm = current->mm;
7218+ vma->vm_start = addr;
7219+ vma->vm_end = addr + PAGE_SIZE;
7220+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
7221+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
7222+ vma->vm_ops = &pax_vm_ops;
7223+
7224+ ret = insert_vm_struct(current->mm, vma);
7225+ if (ret)
7226+ return ret;
7227+
7228+ ++current->mm->total_vm;
7229+ return 0;
7230+}
7231+#endif
7232+
7233+/*
7234+ * PaX: decide what to do with offenders (regs->tpc = fault address)
7235+ *
7236+ * returns 1 when task should be killed
7237+ * 2 when patched PLT trampoline was detected
7238+ * 3 when unpatched PLT trampoline was detected
7239+ */
7240+static int pax_handle_fetch_fault(struct pt_regs *regs)
7241+{
7242+
7243+#ifdef CONFIG_PAX_EMUPLT
7244+ int err;
7245+
7246+ do { /* PaX: patched PLT emulation #1 */
7247+ unsigned int sethi1, sethi2, jmpl;
7248+
7249+ err = get_user(sethi1, (unsigned int *)regs->tpc);
7250+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
7251+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
7252+
7253+ if (err)
7254+ break;
7255+
7256+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
7257+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
7258+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
7259+ {
7260+ unsigned long addr;
7261+
7262+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
7263+ addr = regs->u_regs[UREG_G1];
7264+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7265+
7266+ if (test_thread_flag(TIF_32BIT))
7267+ addr &= 0xFFFFFFFFUL;
7268+
7269+ regs->tpc = addr;
7270+ regs->tnpc = addr+4;
7271+ return 2;
7272+ }
7273+ } while (0);
7274+
7275+ { /* PaX: patched PLT emulation #2 */
7276+ unsigned int ba;
7277+
7278+ err = get_user(ba, (unsigned int *)regs->tpc);
7279+
7280+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
7281+ unsigned long addr;
7282+
7283+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7284+
7285+ if (test_thread_flag(TIF_32BIT))
7286+ addr &= 0xFFFFFFFFUL;
7287+
7288+ regs->tpc = addr;
7289+ regs->tnpc = addr+4;
7290+ return 2;
7291+ }
7292+ }
7293+
7294+ do { /* PaX: patched PLT emulation #3 */
7295+ unsigned int sethi, jmpl, nop;
7296+
7297+ err = get_user(sethi, (unsigned int *)regs->tpc);
7298+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
7299+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7300+
7301+ if (err)
7302+ break;
7303+
7304+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7305+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
7306+ nop == 0x01000000U)
7307+ {
7308+ unsigned long addr;
7309+
7310+ addr = (sethi & 0x003FFFFFU) << 10;
7311+ regs->u_regs[UREG_G1] = addr;
7312+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7313+
7314+ if (test_thread_flag(TIF_32BIT))
7315+ addr &= 0xFFFFFFFFUL;
7316+
7317+ regs->tpc = addr;
7318+ regs->tnpc = addr+4;
7319+ return 2;
7320+ }
7321+ } while (0);
7322+
7323+ do { /* PaX: patched PLT emulation #4 */
7324+ unsigned int sethi, mov1, call, mov2;
7325+
7326+ err = get_user(sethi, (unsigned int *)regs->tpc);
7327+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
7328+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
7329+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
7330+
7331+ if (err)
7332+ break;
7333+
7334+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7335+ mov1 == 0x8210000FU &&
7336+ (call & 0xC0000000U) == 0x40000000U &&
7337+ mov2 == 0x9E100001U)
7338+ {
7339+ unsigned long addr;
7340+
7341+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
7342+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7343+
7344+ if (test_thread_flag(TIF_32BIT))
7345+ addr &= 0xFFFFFFFFUL;
7346+
7347+ regs->tpc = addr;
7348+ regs->tnpc = addr+4;
7349+ return 2;
7350+ }
7351+ } while (0);
7352+
7353+ do { /* PaX: patched PLT emulation #5 */
7354+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
7355+
7356+ err = get_user(sethi, (unsigned int *)regs->tpc);
7357+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7358+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7359+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
7360+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
7361+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
7362+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
7363+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
7364+
7365+ if (err)
7366+ break;
7367+
7368+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7369+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
7370+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7371+ (or1 & 0xFFFFE000U) == 0x82106000U &&
7372+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
7373+ sllx == 0x83287020U &&
7374+ jmpl == 0x81C04005U &&
7375+ nop == 0x01000000U)
7376+ {
7377+ unsigned long addr;
7378+
7379+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7380+ regs->u_regs[UREG_G1] <<= 32;
7381+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7382+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7383+ regs->tpc = addr;
7384+ regs->tnpc = addr+4;
7385+ return 2;
7386+ }
7387+ } while (0);
7388+
7389+ do { /* PaX: patched PLT emulation #6 */
7390+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
7391+
7392+ err = get_user(sethi, (unsigned int *)regs->tpc);
7393+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7394+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7395+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
7396+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
7397+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
7398+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
7399+
7400+ if (err)
7401+ break;
7402+
7403+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7404+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
7405+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7406+ sllx == 0x83287020U &&
7407+ (or & 0xFFFFE000U) == 0x8A116000U &&
7408+ jmpl == 0x81C04005U &&
7409+ nop == 0x01000000U)
7410+ {
7411+ unsigned long addr;
7412+
7413+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
7414+ regs->u_regs[UREG_G1] <<= 32;
7415+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
7416+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7417+ regs->tpc = addr;
7418+ regs->tnpc = addr+4;
7419+ return 2;
7420+ }
7421+ } while (0);
7422+
7423+ do { /* PaX: unpatched PLT emulation step 1 */
7424+ unsigned int sethi, ba, nop;
7425+
7426+ err = get_user(sethi, (unsigned int *)regs->tpc);
7427+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7428+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7429+
7430+ if (err)
7431+ break;
7432+
7433+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7434+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7435+ nop == 0x01000000U)
7436+ {
7437+ unsigned long addr;
7438+ unsigned int save, call;
7439+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
7440+
7441+ if ((ba & 0xFFC00000U) == 0x30800000U)
7442+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7443+ else
7444+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7445+
7446+ if (test_thread_flag(TIF_32BIT))
7447+ addr &= 0xFFFFFFFFUL;
7448+
7449+ err = get_user(save, (unsigned int *)addr);
7450+ err |= get_user(call, (unsigned int *)(addr+4));
7451+ err |= get_user(nop, (unsigned int *)(addr+8));
7452+ if (err)
7453+ break;
7454+
7455+#ifdef CONFIG_PAX_DLRESOLVE
7456+ if (save == 0x9DE3BFA8U &&
7457+ (call & 0xC0000000U) == 0x40000000U &&
7458+ nop == 0x01000000U)
7459+ {
7460+ struct vm_area_struct *vma;
7461+ unsigned long call_dl_resolve;
7462+
7463+ down_read(&current->mm->mmap_sem);
7464+ call_dl_resolve = current->mm->call_dl_resolve;
7465+ up_read(&current->mm->mmap_sem);
7466+ if (likely(call_dl_resolve))
7467+ goto emulate;
7468+
7469+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7470+
7471+ down_write(&current->mm->mmap_sem);
7472+ if (current->mm->call_dl_resolve) {
7473+ call_dl_resolve = current->mm->call_dl_resolve;
7474+ up_write(&current->mm->mmap_sem);
7475+ if (vma)
7476+ kmem_cache_free(vm_area_cachep, vma);
7477+ goto emulate;
7478+ }
7479+
7480+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7481+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7482+ up_write(&current->mm->mmap_sem);
7483+ if (vma)
7484+ kmem_cache_free(vm_area_cachep, vma);
7485+ return 1;
7486+ }
7487+
7488+ if (pax_insert_vma(vma, call_dl_resolve)) {
7489+ up_write(&current->mm->mmap_sem);
7490+ kmem_cache_free(vm_area_cachep, vma);
7491+ return 1;
7492+ }
7493+
7494+ current->mm->call_dl_resolve = call_dl_resolve;
7495+ up_write(&current->mm->mmap_sem);
7496+
7497+emulate:
7498+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7499+ regs->tpc = call_dl_resolve;
7500+ regs->tnpc = addr+4;
7501+ return 3;
7502+ }
7503+#endif
7504+
7505+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7506+ if ((save & 0xFFC00000U) == 0x05000000U &&
7507+ (call & 0xFFFFE000U) == 0x85C0A000U &&
7508+ nop == 0x01000000U)
7509+ {
7510+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7511+ regs->u_regs[UREG_G2] = addr + 4;
7512+ addr = (save & 0x003FFFFFU) << 10;
7513+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7514+
7515+ if (test_thread_flag(TIF_32BIT))
7516+ addr &= 0xFFFFFFFFUL;
7517+
7518+ regs->tpc = addr;
7519+ regs->tnpc = addr+4;
7520+ return 3;
7521+ }
7522+
7523+ /* PaX: 64-bit PLT stub */
7524+ err = get_user(sethi1, (unsigned int *)addr);
7525+ err |= get_user(sethi2, (unsigned int *)(addr+4));
7526+ err |= get_user(or1, (unsigned int *)(addr+8));
7527+ err |= get_user(or2, (unsigned int *)(addr+12));
7528+ err |= get_user(sllx, (unsigned int *)(addr+16));
7529+ err |= get_user(add, (unsigned int *)(addr+20));
7530+ err |= get_user(jmpl, (unsigned int *)(addr+24));
7531+ err |= get_user(nop, (unsigned int *)(addr+28));
7532+ if (err)
7533+ break;
7534+
7535+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7536+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7537+ (or1 & 0xFFFFE000U) == 0x88112000U &&
7538+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
7539+ sllx == 0x89293020U &&
7540+ add == 0x8A010005U &&
7541+ jmpl == 0x89C14000U &&
7542+ nop == 0x01000000U)
7543+ {
7544+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7545+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7546+ regs->u_regs[UREG_G4] <<= 32;
7547+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7548+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7549+ regs->u_regs[UREG_G4] = addr + 24;
7550+ addr = regs->u_regs[UREG_G5];
7551+ regs->tpc = addr;
7552+ regs->tnpc = addr+4;
7553+ return 3;
7554+ }
7555+ }
7556+ } while (0);
7557+
7558+#ifdef CONFIG_PAX_DLRESOLVE
7559+ do { /* PaX: unpatched PLT emulation step 2 */
7560+ unsigned int save, call, nop;
7561+
7562+ err = get_user(save, (unsigned int *)(regs->tpc-4));
7563+ err |= get_user(call, (unsigned int *)regs->tpc);
7564+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7565+ if (err)
7566+ break;
7567+
7568+ if (save == 0x9DE3BFA8U &&
7569+ (call & 0xC0000000U) == 0x40000000U &&
7570+ nop == 0x01000000U)
7571+ {
7572+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7573+
7574+ if (test_thread_flag(TIF_32BIT))
7575+ dl_resolve &= 0xFFFFFFFFUL;
7576+
7577+ regs->u_regs[UREG_RETPC] = regs->tpc;
7578+ regs->tpc = dl_resolve;
7579+ regs->tnpc = dl_resolve+4;
7580+ return 3;
7581+ }
7582+ } while (0);
7583+#endif
7584+
7585+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7586+ unsigned int sethi, ba, nop;
7587+
7588+ err = get_user(sethi, (unsigned int *)regs->tpc);
7589+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7590+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7591+
7592+ if (err)
7593+ break;
7594+
7595+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7596+ (ba & 0xFFF00000U) == 0x30600000U &&
7597+ nop == 0x01000000U)
7598+ {
7599+ unsigned long addr;
7600+
7601+ addr = (sethi & 0x003FFFFFU) << 10;
7602+ regs->u_regs[UREG_G1] = addr;
7603+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7604+
7605+ if (test_thread_flag(TIF_32BIT))
7606+ addr &= 0xFFFFFFFFUL;
7607+
7608+ regs->tpc = addr;
7609+ regs->tnpc = addr+4;
7610+ return 2;
7611+ }
7612+ } while (0);
7613+
7614+#endif
7615+
7616+ return 1;
7617+}
7618+
7619+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7620+{
7621+ unsigned long i;
7622+
7623+ printk(KERN_ERR "PAX: bytes at PC: ");
7624+ for (i = 0; i < 8; i++) {
7625+ unsigned int c;
7626+ if (get_user(c, (unsigned int *)pc+i))
7627+ printk(KERN_CONT "???????? ");
7628+ else
7629+ printk(KERN_CONT "%08x ", c);
7630+ }
7631+ printk("\n");
7632+}
7633+#endif
7634+
7635 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7636 {
7637 struct mm_struct *mm = current->mm;
7638@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7639 if (!vma)
7640 goto bad_area;
7641
7642+#ifdef CONFIG_PAX_PAGEEXEC
7643+ /* PaX: detect ITLB misses on non-exec pages */
7644+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
7645+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
7646+ {
7647+ if (address != regs->tpc)
7648+ goto good_area;
7649+
7650+ up_read(&mm->mmap_sem);
7651+ switch (pax_handle_fetch_fault(regs)) {
7652+
7653+#ifdef CONFIG_PAX_EMUPLT
7654+ case 2:
7655+ case 3:
7656+ return;
7657+#endif
7658+
7659+ }
7660+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
7661+ do_group_exit(SIGKILL);
7662+ }
7663+#endif
7664+
7665 /* Pure DTLB misses do not tell us whether the fault causing
7666 * load/store/atomic was a write or not, it only says that there
7667 * was no match. So in such a case we (carefully) read the
7668diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
7669index f27d103..1b06377 100644
7670--- a/arch/sparc/mm/hugetlbpage.c
7671+++ b/arch/sparc/mm/hugetlbpage.c
7672@@ -69,7 +69,7 @@ full_search:
7673 }
7674 return -ENOMEM;
7675 }
7676- if (likely(!vma || addr + len <= vma->vm_start)) {
7677+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7678 /*
7679 * Remember the place where we stopped the search:
7680 */
7681@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7682 /* make sure it can fit in the remaining address space */
7683 if (likely(addr > len)) {
7684 vma = find_vma(mm, addr-len);
7685- if (!vma || addr <= vma->vm_start) {
7686+ if (check_heap_stack_gap(vma, addr - len, len)) {
7687 /* remember the address as a hint for next time */
7688 return (mm->free_area_cache = addr-len);
7689 }
7690@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7691 if (unlikely(mm->mmap_base < len))
7692 goto bottomup;
7693
7694- addr = (mm->mmap_base-len) & HPAGE_MASK;
7695+ addr = mm->mmap_base - len;
7696
7697 do {
7698+ addr &= HPAGE_MASK;
7699 /*
7700 * Lookup failure means no vma is above this address,
7701 * else if new region fits below vma->vm_start,
7702 * return with success:
7703 */
7704 vma = find_vma(mm, addr);
7705- if (likely(!vma || addr+len <= vma->vm_start)) {
7706+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7707 /* remember the address as a hint for next time */
7708 return (mm->free_area_cache = addr);
7709 }
7710@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7711 mm->cached_hole_size = vma->vm_start - addr;
7712
7713 /* try just below the current vma->vm_start */
7714- addr = (vma->vm_start-len) & HPAGE_MASK;
7715- } while (likely(len < vma->vm_start));
7716+ addr = skip_heap_stack_gap(vma, len);
7717+ } while (!IS_ERR_VALUE(addr));
7718
7719 bottomup:
7720 /*
7721@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7722 if (addr) {
7723 addr = ALIGN(addr, HPAGE_SIZE);
7724 vma = find_vma(mm, addr);
7725- if (task_size - len >= addr &&
7726- (!vma || addr + len <= vma->vm_start))
7727+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7728 return addr;
7729 }
7730 if (mm->get_unmapped_area == arch_get_unmapped_area)
7731diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
7732index dc7c3b1..34c0070 100644
7733--- a/arch/sparc/mm/init_32.c
7734+++ b/arch/sparc/mm/init_32.c
7735@@ -317,6 +317,9 @@ extern void device_scan(void);
7736 pgprot_t PAGE_SHARED __read_mostly;
7737 EXPORT_SYMBOL(PAGE_SHARED);
7738
7739+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
7740+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
7741+
7742 void __init paging_init(void)
7743 {
7744 switch(sparc_cpu_model) {
7745@@ -345,17 +348,17 @@ void __init paging_init(void)
7746
7747 /* Initialize the protection map with non-constant, MMU dependent values. */
7748 protection_map[0] = PAGE_NONE;
7749- protection_map[1] = PAGE_READONLY;
7750- protection_map[2] = PAGE_COPY;
7751- protection_map[3] = PAGE_COPY;
7752+ protection_map[1] = PAGE_READONLY_NOEXEC;
7753+ protection_map[2] = PAGE_COPY_NOEXEC;
7754+ protection_map[3] = PAGE_COPY_NOEXEC;
7755 protection_map[4] = PAGE_READONLY;
7756 protection_map[5] = PAGE_READONLY;
7757 protection_map[6] = PAGE_COPY;
7758 protection_map[7] = PAGE_COPY;
7759 protection_map[8] = PAGE_NONE;
7760- protection_map[9] = PAGE_READONLY;
7761- protection_map[10] = PAGE_SHARED;
7762- protection_map[11] = PAGE_SHARED;
7763+ protection_map[9] = PAGE_READONLY_NOEXEC;
7764+ protection_map[10] = PAGE_SHARED_NOEXEC;
7765+ protection_map[11] = PAGE_SHARED_NOEXEC;
7766 protection_map[12] = PAGE_READONLY;
7767 protection_map[13] = PAGE_READONLY;
7768 protection_map[14] = PAGE_SHARED;
7769diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
7770index 509b1ff..bfd7118 100644
7771--- a/arch/sparc/mm/srmmu.c
7772+++ b/arch/sparc/mm/srmmu.c
7773@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
7774 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
7775 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
7776 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
7777+
7778+#ifdef CONFIG_PAX_PAGEEXEC
7779+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
7780+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
7781+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
7782+#endif
7783+
7784 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
7785 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
7786
7787diff --git a/arch/um/Makefile b/arch/um/Makefile
7788index fc633db..5e1a1c2 100644
7789--- a/arch/um/Makefile
7790+++ b/arch/um/Makefile
7791@@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
7792 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
7793 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
7794
7795+ifdef CONSTIFY_PLUGIN
7796+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7797+endif
7798+
7799 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
7800
7801 #This will adjust *FLAGS accordingly to the platform.
7802diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7803index 19e1bdd..3665b77 100644
7804--- a/arch/um/include/asm/cache.h
7805+++ b/arch/um/include/asm/cache.h
7806@@ -1,6 +1,7 @@
7807 #ifndef __UM_CACHE_H
7808 #define __UM_CACHE_H
7809
7810+#include <linux/const.h>
7811
7812 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7813 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7814@@ -12,6 +13,6 @@
7815 # define L1_CACHE_SHIFT 5
7816 #endif
7817
7818-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7819+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7820
7821 #endif
7822diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7823index 6c03acd..a5e0215 100644
7824--- a/arch/um/include/asm/kmap_types.h
7825+++ b/arch/um/include/asm/kmap_types.h
7826@@ -23,6 +23,7 @@ enum km_type {
7827 KM_IRQ1,
7828 KM_SOFTIRQ0,
7829 KM_SOFTIRQ1,
7830+ KM_CLEARPAGE,
7831 KM_TYPE_NR
7832 };
7833
7834diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
7835index 4cc9b6c..02e5029 100644
7836--- a/arch/um/include/asm/page.h
7837+++ b/arch/um/include/asm/page.h
7838@@ -14,6 +14,9 @@
7839 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7840 #define PAGE_MASK (~(PAGE_SIZE-1))
7841
7842+#define ktla_ktva(addr) (addr)
7843+#define ktva_ktla(addr) (addr)
7844+
7845 #ifndef __ASSEMBLY__
7846
7847 struct page;
7848diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
7849index 4a28a15..654dc2a 100644
7850--- a/arch/um/kernel/process.c
7851+++ b/arch/um/kernel/process.c
7852@@ -393,22 +393,6 @@ int singlestepping(void * t)
7853 return 2;
7854 }
7855
7856-/*
7857- * Only x86 and x86_64 have an arch_align_stack().
7858- * All other arches have "#define arch_align_stack(x) (x)"
7859- * in their asm/system.h
7860- * As this is included in UML from asm-um/system-generic.h,
7861- * we can use it to behave as the subarch does.
7862- */
7863-#ifndef arch_align_stack
7864-unsigned long arch_align_stack(unsigned long sp)
7865-{
7866- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7867- sp -= get_random_int() % 8192;
7868- return sp & ~0xf;
7869-}
7870-#endif
7871-
7872 unsigned long get_wchan(struct task_struct *p)
7873 {
7874 unsigned long stack_page, sp, ip;
7875diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
7876index d1b93c4..ae1b7fd 100644
7877--- a/arch/um/sys-i386/shared/sysdep/system.h
7878+++ b/arch/um/sys-i386/shared/sysdep/system.h
7879@@ -17,7 +17,7 @@
7880 # define AT_VECTOR_SIZE_ARCH 1
7881 #endif
7882
7883-extern unsigned long arch_align_stack(unsigned long sp);
7884+#define arch_align_stack(x) ((x) & ~0xfUL)
7885
7886 void default_idle(void);
7887
7888diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
7889index 857ca0b..9a2669d 100644
7890--- a/arch/um/sys-i386/syscalls.c
7891+++ b/arch/um/sys-i386/syscalls.c
7892@@ -11,6 +11,21 @@
7893 #include "asm/uaccess.h"
7894 #include "asm/unistd.h"
7895
7896+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
7897+{
7898+ unsigned long pax_task_size = TASK_SIZE;
7899+
7900+#ifdef CONFIG_PAX_SEGMEXEC
7901+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
7902+ pax_task_size = SEGMEXEC_TASK_SIZE;
7903+#endif
7904+
7905+ if (len > pax_task_size || addr > pax_task_size - len)
7906+ return -EINVAL;
7907+
7908+ return 0;
7909+}
7910+
7911 /*
7912 * Perform the select(nd, in, out, ex, tv) and mmap() system
7913 * calls. Linux/i386 didn't use to be able to handle more than
7914diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
7915index d1b93c4..ae1b7fd 100644
7916--- a/arch/um/sys-x86_64/shared/sysdep/system.h
7917+++ b/arch/um/sys-x86_64/shared/sysdep/system.h
7918@@ -17,7 +17,7 @@
7919 # define AT_VECTOR_SIZE_ARCH 1
7920 #endif
7921
7922-extern unsigned long arch_align_stack(unsigned long sp);
7923+#define arch_align_stack(x) ((x) & ~0xfUL)
7924
7925 void default_idle(void);
7926
7927diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7928index 73ae02a..f932de5 100644
7929--- a/arch/x86/Kconfig
7930+++ b/arch/x86/Kconfig
7931@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
7932
7933 config X86_32_LAZY_GS
7934 def_bool y
7935- depends on X86_32 && !CC_STACKPROTECTOR
7936+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7937
7938 config KTIME_SCALAR
7939 def_bool X86_32
7940@@ -1008,7 +1008,7 @@ choice
7941
7942 config NOHIGHMEM
7943 bool "off"
7944- depends on !X86_NUMAQ
7945+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7946 ---help---
7947 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7948 However, the address space of 32-bit x86 processors is only 4
7949@@ -1045,7 +1045,7 @@ config NOHIGHMEM
7950
7951 config HIGHMEM4G
7952 bool "4GB"
7953- depends on !X86_NUMAQ
7954+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7955 ---help---
7956 Select this if you have a 32-bit processor and between 1 and 4
7957 gigabytes of physical RAM.
7958@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
7959 hex
7960 default 0xB0000000 if VMSPLIT_3G_OPT
7961 default 0x80000000 if VMSPLIT_2G
7962- default 0x78000000 if VMSPLIT_2G_OPT
7963+ default 0x70000000 if VMSPLIT_2G_OPT
7964 default 0x40000000 if VMSPLIT_1G
7965 default 0xC0000000
7966 depends on X86_32
7967@@ -1460,6 +1460,7 @@ config SECCOMP
7968
7969 config CC_STACKPROTECTOR
7970 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7971+ depends on X86_64 || !PAX_MEMORY_UDEREF
7972 ---help---
7973 This option turns on the -fstack-protector GCC feature. This
7974 feature puts, at the beginning of functions, a canary value on
7975@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
7976 config PHYSICAL_START
7977 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
7978 default "0x1000000"
7979+ range 0x400000 0x40000000
7980 ---help---
7981 This gives the physical address where the kernel is loaded.
7982
7983@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
7984 hex
7985 prompt "Alignment value to which kernel should be aligned" if X86_32
7986 default "0x1000000"
7987+ range 0x400000 0x1000000 if PAX_KERNEXEC
7988 range 0x2000 0x1000000
7989 ---help---
7990 This value puts the alignment restrictions on physical address
7991@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
7992 Say N if you want to disable CPU hotplug.
7993
7994 config COMPAT_VDSO
7995- def_bool y
7996+ def_bool n
7997 prompt "Compat VDSO support"
7998 depends on X86_32 || IA32_EMULATION
7999+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
8000 ---help---
8001 Map the 32-bit VDSO to the predictable old-style address too.
8002 ---help---
8003diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
8004index 0e566103..1a6b57e 100644
8005--- a/arch/x86/Kconfig.cpu
8006+++ b/arch/x86/Kconfig.cpu
8007@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
8008
8009 config X86_F00F_BUG
8010 def_bool y
8011- depends on M586MMX || M586TSC || M586 || M486 || M386
8012+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
8013
8014 config X86_WP_WORKS_OK
8015 def_bool y
8016@@ -360,7 +360,7 @@ config X86_POPAD_OK
8017
8018 config X86_ALIGNMENT_16
8019 def_bool y
8020- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
8021+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
8022
8023 config X86_INTEL_USERCOPY
8024 def_bool y
8025@@ -406,7 +406,7 @@ config X86_CMPXCHG64
8026 # generates cmov.
8027 config X86_CMOV
8028 def_bool y
8029- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
8030+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
8031
8032 config X86_MINIMUM_CPU_FAMILY
8033 int
8034diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
8035index d105f29..c928727 100644
8036--- a/arch/x86/Kconfig.debug
8037+++ b/arch/x86/Kconfig.debug
8038@@ -99,7 +99,7 @@ config X86_PTDUMP
8039 config DEBUG_RODATA
8040 bool "Write protect kernel read-only data structures"
8041 default y
8042- depends on DEBUG_KERNEL
8043+ depends on DEBUG_KERNEL && BROKEN
8044 ---help---
8045 Mark the kernel read-only data as write-protected in the pagetables,
8046 in order to catch accidental (and incorrect) writes to such const
8047diff --git a/arch/x86/Makefile b/arch/x86/Makefile
8048index d2d24c9..0f21f8d 100644
8049--- a/arch/x86/Makefile
8050+++ b/arch/x86/Makefile
8051@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
8052 else
8053 BITS := 64
8054 UTS_MACHINE := x86_64
8055+ biarch := $(call cc-option,-m64)
8056 CHECKFLAGS += -D__x86_64__ -m64
8057
8058 KBUILD_AFLAGS += -m64
8059@@ -189,3 +190,12 @@ define archhelp
8060 echo ' FDARGS="..." arguments for the booted kernel'
8061 echo ' FDINITRD=file initrd for the booted kernel'
8062 endef
8063+
8064+define OLD_LD
8065+
8066+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
8067+*** Please upgrade your binutils to 2.18 or newer
8068+endef
8069+
8070+archprepare:
8071+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
8072diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
8073index ec749c2..bbb5319 100644
8074--- a/arch/x86/boot/Makefile
8075+++ b/arch/x86/boot/Makefile
8076@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
8077 $(call cc-option, -fno-stack-protector) \
8078 $(call cc-option, -mpreferred-stack-boundary=2)
8079 KBUILD_CFLAGS += $(call cc-option, -m32)
8080+ifdef CONSTIFY_PLUGIN
8081+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
8082+endif
8083 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
8084 GCOV_PROFILE := n
8085
8086diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
8087index 878e4b9..20537ab 100644
8088--- a/arch/x86/boot/bitops.h
8089+++ b/arch/x86/boot/bitops.h
8090@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
8091 u8 v;
8092 const u32 *p = (const u32 *)addr;
8093
8094- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
8095+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
8096 return v;
8097 }
8098
8099@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
8100
8101 static inline void set_bit(int nr, void *addr)
8102 {
8103- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
8104+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
8105 }
8106
8107 #endif /* BOOT_BITOPS_H */
8108diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
8109index 98239d2..f40214c 100644
8110--- a/arch/x86/boot/boot.h
8111+++ b/arch/x86/boot/boot.h
8112@@ -82,7 +82,7 @@ static inline void io_delay(void)
8113 static inline u16 ds(void)
8114 {
8115 u16 seg;
8116- asm("movw %%ds,%0" : "=rm" (seg));
8117+ asm volatile("movw %%ds,%0" : "=rm" (seg));
8118 return seg;
8119 }
8120
8121@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
8122 static inline int memcmp(const void *s1, const void *s2, size_t len)
8123 {
8124 u8 diff;
8125- asm("repe; cmpsb; setnz %0"
8126+ asm volatile("repe; cmpsb; setnz %0"
8127 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
8128 return diff;
8129 }
8130diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
8131index f8ed065..5bf5ff3 100644
8132--- a/arch/x86/boot/compressed/Makefile
8133+++ b/arch/x86/boot/compressed/Makefile
8134@@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
8135 KBUILD_CFLAGS += $(cflags-y)
8136 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
8137 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
8138+ifdef CONSTIFY_PLUGIN
8139+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
8140+endif
8141
8142 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
8143 GCOV_PROFILE := n
8144diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
8145index f543b70..b60fba8 100644
8146--- a/arch/x86/boot/compressed/head_32.S
8147+++ b/arch/x86/boot/compressed/head_32.S
8148@@ -76,7 +76,7 @@ ENTRY(startup_32)
8149 notl %eax
8150 andl %eax, %ebx
8151 #else
8152- movl $LOAD_PHYSICAL_ADDR, %ebx
8153+ movl $____LOAD_PHYSICAL_ADDR, %ebx
8154 #endif
8155
8156 /* Target address to relocate to for decompression */
8157@@ -149,7 +149,7 @@ relocated:
8158 * and where it was actually loaded.
8159 */
8160 movl %ebp, %ebx
8161- subl $LOAD_PHYSICAL_ADDR, %ebx
8162+ subl $____LOAD_PHYSICAL_ADDR, %ebx
8163 jz 2f /* Nothing to be done if loaded at compiled addr. */
8164 /*
8165 * Process relocations.
8166@@ -157,8 +157,7 @@ relocated:
8167
8168 1: subl $4, %edi
8169 movl (%edi), %ecx
8170- testl %ecx, %ecx
8171- jz 2f
8172+ jecxz 2f
8173 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
8174 jmp 1b
8175 2:
8176diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
8177index 077e1b6..2c6b13b 100644
8178--- a/arch/x86/boot/compressed/head_64.S
8179+++ b/arch/x86/boot/compressed/head_64.S
8180@@ -91,7 +91,7 @@ ENTRY(startup_32)
8181 notl %eax
8182 andl %eax, %ebx
8183 #else
8184- movl $LOAD_PHYSICAL_ADDR, %ebx
8185+ movl $____LOAD_PHYSICAL_ADDR, %ebx
8186 #endif
8187
8188 /* Target address to relocate to for decompression */
8189@@ -183,7 +183,7 @@ no_longmode:
8190 hlt
8191 jmp 1b
8192
8193-#include "../../kernel/verify_cpu_64.S"
8194+#include "../../kernel/verify_cpu.S"
8195
8196 /*
8197 * Be careful here startup_64 needs to be at a predictable
8198@@ -234,7 +234,7 @@ ENTRY(startup_64)
8199 notq %rax
8200 andq %rax, %rbp
8201 #else
8202- movq $LOAD_PHYSICAL_ADDR, %rbp
8203+ movq $____LOAD_PHYSICAL_ADDR, %rbp
8204 #endif
8205
8206 /* Target address to relocate to for decompression */
8207diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
8208index 842b2a3..f00178b 100644
8209--- a/arch/x86/boot/compressed/misc.c
8210+++ b/arch/x86/boot/compressed/misc.c
8211@@ -288,7 +288,7 @@ static void parse_elf(void *output)
8212 case PT_LOAD:
8213 #ifdef CONFIG_RELOCATABLE
8214 dest = output;
8215- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
8216+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
8217 #else
8218 dest = (void *)(phdr->p_paddr);
8219 #endif
8220@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
8221 error("Destination address too large");
8222 #endif
8223 #ifndef CONFIG_RELOCATABLE
8224- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
8225+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
8226 error("Wrong destination address");
8227 #endif
8228
8229diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
8230index bcbd36c..b1754af 100644
8231--- a/arch/x86/boot/compressed/mkpiggy.c
8232+++ b/arch/x86/boot/compressed/mkpiggy.c
8233@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
8234
8235 offs = (olen > ilen) ? olen - ilen : 0;
8236 offs += olen >> 12; /* Add 8 bytes for each 32K block */
8237- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
8238+ offs += 64*1024; /* Add 64K bytes slack */
8239 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
8240
8241 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
8242diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
8243index bbeb0c3..f5167ab 100644
8244--- a/arch/x86/boot/compressed/relocs.c
8245+++ b/arch/x86/boot/compressed/relocs.c
8246@@ -10,8 +10,11 @@
8247 #define USE_BSD
8248 #include <endian.h>
8249
8250+#include "../../../../include/linux/autoconf.h"
8251+
8252 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
8253 static Elf32_Ehdr ehdr;
8254+static Elf32_Phdr *phdr;
8255 static unsigned long reloc_count, reloc_idx;
8256 static unsigned long *relocs;
8257
8258@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
8259
8260 static int is_safe_abs_reloc(const char* sym_name)
8261 {
8262- int i;
8263+ unsigned int i;
8264
8265 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
8266 if (!strcmp(sym_name, safe_abs_relocs[i]))
8267@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
8268 }
8269 }
8270
8271+static void read_phdrs(FILE *fp)
8272+{
8273+ unsigned int i;
8274+
8275+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
8276+ if (!phdr) {
8277+ die("Unable to allocate %d program headers\n",
8278+ ehdr.e_phnum);
8279+ }
8280+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
8281+ die("Seek to %d failed: %s\n",
8282+ ehdr.e_phoff, strerror(errno));
8283+ }
8284+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
8285+ die("Cannot read ELF program headers: %s\n",
8286+ strerror(errno));
8287+ }
8288+ for(i = 0; i < ehdr.e_phnum; i++) {
8289+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
8290+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
8291+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
8292+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
8293+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
8294+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
8295+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
8296+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
8297+ }
8298+
8299+}
8300+
8301 static void read_shdrs(FILE *fp)
8302 {
8303- int i;
8304+ unsigned int i;
8305 Elf32_Shdr shdr;
8306
8307 secs = calloc(ehdr.e_shnum, sizeof(struct section));
8308@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
8309
8310 static void read_strtabs(FILE *fp)
8311 {
8312- int i;
8313+ unsigned int i;
8314 for (i = 0; i < ehdr.e_shnum; i++) {
8315 struct section *sec = &secs[i];
8316 if (sec->shdr.sh_type != SHT_STRTAB) {
8317@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
8318
8319 static void read_symtabs(FILE *fp)
8320 {
8321- int i,j;
8322+ unsigned int i,j;
8323 for (i = 0; i < ehdr.e_shnum; i++) {
8324 struct section *sec = &secs[i];
8325 if (sec->shdr.sh_type != SHT_SYMTAB) {
8326@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
8327
8328 static void read_relocs(FILE *fp)
8329 {
8330- int i,j;
8331+ unsigned int i,j;
8332+ uint32_t base;
8333+
8334 for (i = 0; i < ehdr.e_shnum; i++) {
8335 struct section *sec = &secs[i];
8336 if (sec->shdr.sh_type != SHT_REL) {
8337@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
8338 die("Cannot read symbol table: %s\n",
8339 strerror(errno));
8340 }
8341+ base = 0;
8342+ for (j = 0; j < ehdr.e_phnum; j++) {
8343+ if (phdr[j].p_type != PT_LOAD )
8344+ continue;
8345+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
8346+ continue;
8347+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
8348+ break;
8349+ }
8350 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
8351 Elf32_Rel *rel = &sec->reltab[j];
8352- rel->r_offset = elf32_to_cpu(rel->r_offset);
8353+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
8354 rel->r_info = elf32_to_cpu(rel->r_info);
8355 }
8356 }
8357@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
8358
8359 static void print_absolute_symbols(void)
8360 {
8361- int i;
8362+ unsigned int i;
8363 printf("Absolute symbols\n");
8364 printf(" Num: Value Size Type Bind Visibility Name\n");
8365 for (i = 0; i < ehdr.e_shnum; i++) {
8366 struct section *sec = &secs[i];
8367 char *sym_strtab;
8368 Elf32_Sym *sh_symtab;
8369- int j;
8370+ unsigned int j;
8371
8372 if (sec->shdr.sh_type != SHT_SYMTAB) {
8373 continue;
8374@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
8375
8376 static void print_absolute_relocs(void)
8377 {
8378- int i, printed = 0;
8379+ unsigned int i, printed = 0;
8380
8381 for (i = 0; i < ehdr.e_shnum; i++) {
8382 struct section *sec = &secs[i];
8383 struct section *sec_applies, *sec_symtab;
8384 char *sym_strtab;
8385 Elf32_Sym *sh_symtab;
8386- int j;
8387+ unsigned int j;
8388 if (sec->shdr.sh_type != SHT_REL) {
8389 continue;
8390 }
8391@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
8392
8393 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
8394 {
8395- int i;
8396+ unsigned int i;
8397 /* Walk through the relocations */
8398 for (i = 0; i < ehdr.e_shnum; i++) {
8399 char *sym_strtab;
8400 Elf32_Sym *sh_symtab;
8401 struct section *sec_applies, *sec_symtab;
8402- int j;
8403+ unsigned int j;
8404 struct section *sec = &secs[i];
8405
8406 if (sec->shdr.sh_type != SHT_REL) {
8407@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
8408 if (sym->st_shndx == SHN_ABS) {
8409 continue;
8410 }
8411+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
8412+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
8413+ continue;
8414+
8415+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
8416+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
8417+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
8418+ continue;
8419+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
8420+ continue;
8421+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
8422+ continue;
8423+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
8424+ continue;
8425+#endif
8426 if (r_type == R_386_NONE || r_type == R_386_PC32) {
8427 /*
8428 * NONE can be ignored and and PC relative
8429@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
8430
8431 static void emit_relocs(int as_text)
8432 {
8433- int i;
8434+ unsigned int i;
8435 /* Count how many relocations I have and allocate space for them. */
8436 reloc_count = 0;
8437 walk_relocs(count_reloc);
8438@@ -634,6 +693,7 @@ int main(int argc, char **argv)
8439 fname, strerror(errno));
8440 }
8441 read_ehdr(fp);
8442+ read_phdrs(fp);
8443 read_shdrs(fp);
8444 read_strtabs(fp);
8445 read_symtabs(fp);
8446diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
8447index 4d3ff03..e4972ff 100644
8448--- a/arch/x86/boot/cpucheck.c
8449+++ b/arch/x86/boot/cpucheck.c
8450@@ -74,7 +74,7 @@ static int has_fpu(void)
8451 u16 fcw = -1, fsw = -1;
8452 u32 cr0;
8453
8454- asm("movl %%cr0,%0" : "=r" (cr0));
8455+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
8456 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
8457 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
8458 asm volatile("movl %0,%%cr0" : : "r" (cr0));
8459@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
8460 {
8461 u32 f0, f1;
8462
8463- asm("pushfl ; "
8464+ asm volatile("pushfl ; "
8465 "pushfl ; "
8466 "popl %0 ; "
8467 "movl %0,%1 ; "
8468@@ -115,7 +115,7 @@ static void get_flags(void)
8469 set_bit(X86_FEATURE_FPU, cpu.flags);
8470
8471 if (has_eflag(X86_EFLAGS_ID)) {
8472- asm("cpuid"
8473+ asm volatile("cpuid"
8474 : "=a" (max_intel_level),
8475 "=b" (cpu_vendor[0]),
8476 "=d" (cpu_vendor[1]),
8477@@ -124,7 +124,7 @@ static void get_flags(void)
8478
8479 if (max_intel_level >= 0x00000001 &&
8480 max_intel_level <= 0x0000ffff) {
8481- asm("cpuid"
8482+ asm volatile("cpuid"
8483 : "=a" (tfms),
8484 "=c" (cpu.flags[4]),
8485 "=d" (cpu.flags[0])
8486@@ -136,7 +136,7 @@ static void get_flags(void)
8487 cpu.model += ((tfms >> 16) & 0xf) << 4;
8488 }
8489
8490- asm("cpuid"
8491+ asm volatile("cpuid"
8492 : "=a" (max_amd_level)
8493 : "a" (0x80000000)
8494 : "ebx", "ecx", "edx");
8495@@ -144,7 +144,7 @@ static void get_flags(void)
8496 if (max_amd_level >= 0x80000001 &&
8497 max_amd_level <= 0x8000ffff) {
8498 u32 eax = 0x80000001;
8499- asm("cpuid"
8500+ asm volatile("cpuid"
8501 : "+a" (eax),
8502 "=c" (cpu.flags[6]),
8503 "=d" (cpu.flags[1])
8504@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8505 u32 ecx = MSR_K7_HWCR;
8506 u32 eax, edx;
8507
8508- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8509+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8510 eax &= ~(1 << 15);
8511- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8512+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8513
8514 get_flags(); /* Make sure it really did something */
8515 err = check_flags();
8516@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8517 u32 ecx = MSR_VIA_FCR;
8518 u32 eax, edx;
8519
8520- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8521+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8522 eax |= (1<<1)|(1<<7);
8523- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8524+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8525
8526 set_bit(X86_FEATURE_CX8, cpu.flags);
8527 err = check_flags();
8528@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8529 u32 eax, edx;
8530 u32 level = 1;
8531
8532- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8533- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8534- asm("cpuid"
8535+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8536+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8537+ asm volatile("cpuid"
8538 : "+a" (level), "=d" (cpu.flags[0])
8539 : : "ecx", "ebx");
8540- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8541+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8542
8543 err = check_flags();
8544 }
8545diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
8546index b31cc54..8d69237 100644
8547--- a/arch/x86/boot/header.S
8548+++ b/arch/x86/boot/header.S
8549@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
8550 # single linked list of
8551 # struct setup_data
8552
8553-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8554+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8555
8556 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8557 #define VO_INIT_SIZE (VO__end - VO__text)
8558diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
8559index cae3feb..ff8ff2a 100644
8560--- a/arch/x86/boot/memory.c
8561+++ b/arch/x86/boot/memory.c
8562@@ -19,7 +19,7 @@
8563
8564 static int detect_memory_e820(void)
8565 {
8566- int count = 0;
8567+ unsigned int count = 0;
8568 struct biosregs ireg, oreg;
8569 struct e820entry *desc = boot_params.e820_map;
8570 static struct e820entry buf; /* static so it is zeroed */
8571diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
8572index 11e8c6e..fdbb1ed 100644
8573--- a/arch/x86/boot/video-vesa.c
8574+++ b/arch/x86/boot/video-vesa.c
8575@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
8576
8577 boot_params.screen_info.vesapm_seg = oreg.es;
8578 boot_params.screen_info.vesapm_off = oreg.di;
8579+ boot_params.screen_info.vesapm_size = oreg.cx;
8580 }
8581
8582 /*
8583diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8584index d42da38..787cdf3 100644
8585--- a/arch/x86/boot/video.c
8586+++ b/arch/x86/boot/video.c
8587@@ -90,7 +90,7 @@ static void store_mode_params(void)
8588 static unsigned int get_entry(void)
8589 {
8590 char entry_buf[4];
8591- int i, len = 0;
8592+ unsigned int i, len = 0;
8593 int key;
8594 unsigned int v;
8595
8596diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8597index 5b577d5..3c1fed4 100644
8598--- a/arch/x86/crypto/aes-x86_64-asm_64.S
8599+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
8600@@ -8,6 +8,8 @@
8601 * including this sentence is retained in full.
8602 */
8603
8604+#include <asm/alternative-asm.h>
8605+
8606 .extern crypto_ft_tab
8607 .extern crypto_it_tab
8608 .extern crypto_fl_tab
8609@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
8610 je B192; \
8611 leaq 32(r9),r9;
8612
8613+#define ret pax_force_retaddr 0, 1; ret
8614+
8615 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8616 movq r1,r2; \
8617 movq r3,r4; \
8618diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
8619index eb0566e..e3ebad8 100644
8620--- a/arch/x86/crypto/aesni-intel_asm.S
8621+++ b/arch/x86/crypto/aesni-intel_asm.S
8622@@ -16,6 +16,7 @@
8623 */
8624
8625 #include <linux/linkage.h>
8626+#include <asm/alternative-asm.h>
8627
8628 .text
8629
8630@@ -52,6 +53,7 @@ _key_expansion_256a:
8631 pxor %xmm1, %xmm0
8632 movaps %xmm0, (%rcx)
8633 add $0x10, %rcx
8634+ pax_force_retaddr_bts
8635 ret
8636
8637 _key_expansion_192a:
8638@@ -75,6 +77,7 @@ _key_expansion_192a:
8639 shufps $0b01001110, %xmm2, %xmm1
8640 movaps %xmm1, 16(%rcx)
8641 add $0x20, %rcx
8642+ pax_force_retaddr_bts
8643 ret
8644
8645 _key_expansion_192b:
8646@@ -93,6 +96,7 @@ _key_expansion_192b:
8647
8648 movaps %xmm0, (%rcx)
8649 add $0x10, %rcx
8650+ pax_force_retaddr_bts
8651 ret
8652
8653 _key_expansion_256b:
8654@@ -104,6 +108,7 @@ _key_expansion_256b:
8655 pxor %xmm1, %xmm2
8656 movaps %xmm2, (%rcx)
8657 add $0x10, %rcx
8658+ pax_force_retaddr_bts
8659 ret
8660
8661 /*
8662@@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
8663 cmp %rcx, %rdi
8664 jb .Ldec_key_loop
8665 xor %rax, %rax
8666+ pax_force_retaddr 0, 1
8667 ret
8668+ENDPROC(aesni_set_key)
8669
8670 /*
8671 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8672@@ -249,7 +256,9 @@ ENTRY(aesni_enc)
8673 movups (INP), STATE # input
8674 call _aesni_enc1
8675 movups STATE, (OUTP) # output
8676+ pax_force_retaddr 0, 1
8677 ret
8678+ENDPROC(aesni_enc)
8679
8680 /*
8681 * _aesni_enc1: internal ABI
8682@@ -319,6 +328,7 @@ _aesni_enc1:
8683 movaps 0x70(TKEYP), KEY
8684 # aesenclast KEY, STATE # last round
8685 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
8686+ pax_force_retaddr_bts
8687 ret
8688
8689 /*
8690@@ -482,6 +492,7 @@ _aesni_enc4:
8691 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
8692 # aesenclast KEY, STATE4
8693 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
8694+ pax_force_retaddr_bts
8695 ret
8696
8697 /*
8698@@ -493,7 +504,9 @@ ENTRY(aesni_dec)
8699 movups (INP), STATE # input
8700 call _aesni_dec1
8701 movups STATE, (OUTP) #output
8702+ pax_force_retaddr 0, 1
8703 ret
8704+ENDPROC(aesni_dec)
8705
8706 /*
8707 * _aesni_dec1: internal ABI
8708@@ -563,6 +576,7 @@ _aesni_dec1:
8709 movaps 0x70(TKEYP), KEY
8710 # aesdeclast KEY, STATE # last round
8711 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
8712+ pax_force_retaddr_bts
8713 ret
8714
8715 /*
8716@@ -726,6 +740,7 @@ _aesni_dec4:
8717 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
8718 # aesdeclast KEY, STATE4
8719 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
8720+ pax_force_retaddr_bts
8721 ret
8722
8723 /*
8724@@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
8725 cmp $16, LEN
8726 jge .Lecb_enc_loop1
8727 .Lecb_enc_ret:
8728+ pax_force_retaddr 0, 1
8729 ret
8730+ENDPROC(aesni_ecb_enc)
8731
8732 /*
8733 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8734@@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
8735 cmp $16, LEN
8736 jge .Lecb_dec_loop1
8737 .Lecb_dec_ret:
8738+ pax_force_retaddr 0, 1
8739 ret
8740+ENDPROC(aesni_ecb_dec)
8741
8742 /*
8743 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8744@@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
8745 jge .Lcbc_enc_loop
8746 movups STATE, (IVP)
8747 .Lcbc_enc_ret:
8748+ pax_force_retaddr 0, 1
8749 ret
8750+ENDPROC(aesni_cbc_enc)
8751
8752 /*
8753 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8754@@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
8755 .Lcbc_dec_ret:
8756 movups IV, (IVP)
8757 .Lcbc_dec_just_ret:
8758+ pax_force_retaddr 0, 1
8759 ret
8760+ENDPROC(aesni_cbc_dec)
8761diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8762index 6214a9b..1f4fc9a 100644
8763--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8764+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8765@@ -1,3 +1,5 @@
8766+#include <asm/alternative-asm.h>
8767+
8768 # enter ECRYPT_encrypt_bytes
8769 .text
8770 .p2align 5
8771@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
8772 add %r11,%rsp
8773 mov %rdi,%rax
8774 mov %rsi,%rdx
8775+ pax_force_retaddr 0, 1
8776 ret
8777 # bytesatleast65:
8778 ._bytesatleast65:
8779@@ -891,6 +894,7 @@ ECRYPT_keysetup:
8780 add %r11,%rsp
8781 mov %rdi,%rax
8782 mov %rsi,%rdx
8783+ pax_force_retaddr
8784 ret
8785 # enter ECRYPT_ivsetup
8786 .text
8787@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
8788 add %r11,%rsp
8789 mov %rdi,%rax
8790 mov %rsi,%rdx
8791+ pax_force_retaddr
8792 ret
8793diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8794index 35974a5..5662ae2 100644
8795--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8796+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8797@@ -21,6 +21,7 @@
8798 .text
8799
8800 #include <asm/asm-offsets.h>
8801+#include <asm/alternative-asm.h>
8802
8803 #define a_offset 0
8804 #define b_offset 4
8805@@ -269,6 +270,7 @@ twofish_enc_blk:
8806
8807 popq R1
8808 movq $1,%rax
8809+ pax_force_retaddr 0, 1
8810 ret
8811
8812 twofish_dec_blk:
8813@@ -321,4 +323,5 @@ twofish_dec_blk:
8814
8815 popq R1
8816 movq $1,%rax
8817+ pax_force_retaddr 0, 1
8818 ret
8819diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8820index 14531ab..bc68a7b 100644
8821--- a/arch/x86/ia32/ia32_aout.c
8822+++ b/arch/x86/ia32/ia32_aout.c
8823@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8824 unsigned long dump_start, dump_size;
8825 struct user32 dump;
8826
8827+ memset(&dump, 0, sizeof(dump));
8828+
8829 fs = get_fs();
8830 set_fs(KERNEL_DS);
8831 has_dumped = 1;
8832@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8833 dump_size = dump.u_ssize << PAGE_SHIFT;
8834 DUMP_WRITE(dump_start, dump_size);
8835 }
8836- /*
8837- * Finally dump the task struct. Not be used by gdb, but
8838- * could be useful
8839- */
8840- set_fs(KERNEL_DS);
8841- DUMP_WRITE(current, sizeof(*current));
8842 end_coredump:
8843 set_fs(fs);
8844 return has_dumped;
8845@@ -327,6 +323,13 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
8846 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
8847 current->mm->cached_hole_size = 0;
8848
8849+ retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
8850+ if (retval < 0) {
8851+ /* Someone check-me: is this error path enough? */
8852+ send_sig(SIGKILL, current, 0);
8853+ return retval;
8854+ }
8855+
8856 install_exec_creds(bprm);
8857 current->flags &= ~PF_FORKNOEXEC;
8858
8859@@ -422,13 +425,6 @@ beyond_if:
8860
8861 set_brk(current->mm->start_brk, current->mm->brk);
8862
8863- retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
8864- if (retval < 0) {
8865- /* Someone check-me: is this error path enough? */
8866- send_sig(SIGKILL, current, 0);
8867- return retval;
8868- }
8869-
8870 current->mm->start_stack =
8871 (unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
8872 /* start thread */
8873diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8874index 588a7aa..a3468b0 100644
8875--- a/arch/x86/ia32/ia32_signal.c
8876+++ b/arch/x86/ia32/ia32_signal.c
8877@@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8878 }
8879 seg = get_fs();
8880 set_fs(KERNEL_DS);
8881- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8882+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8883 set_fs(seg);
8884 if (ret >= 0 && uoss_ptr) {
8885 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8886@@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8887 */
8888 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8889 size_t frame_size,
8890- void **fpstate)
8891+ void __user **fpstate)
8892 {
8893 unsigned long sp;
8894
8895@@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8896
8897 if (used_math()) {
8898 sp = sp - sig_xstate_ia32_size;
8899- *fpstate = (struct _fpstate_ia32 *) sp;
8900+ *fpstate = (struct _fpstate_ia32 __user *) sp;
8901 if (save_i387_xstate_ia32(*fpstate) < 0)
8902 return (void __user *) -1L;
8903 }
8904@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8905 sp -= frame_size;
8906 /* Align the stack pointer according to the i386 ABI,
8907 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8908- sp = ((sp + 4) & -16ul) - 4;
8909+ sp = ((sp - 12) & -16ul) - 4;
8910 return (void __user *) sp;
8911 }
8912
8913@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8914 * These are actually not used anymore, but left because some
8915 * gdb versions depend on them as a marker.
8916 */
8917- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8918+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8919 } put_user_catch(err);
8920
8921 if (err)
8922@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8923 0xb8,
8924 __NR_ia32_rt_sigreturn,
8925 0x80cd,
8926- 0,
8927+ 0
8928 };
8929
8930 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8931@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8932
8933 if (ka->sa.sa_flags & SA_RESTORER)
8934 restorer = ka->sa.sa_restorer;
8935+ else if (current->mm->context.vdso)
8936+ /* Return stub is in 32bit vsyscall page */
8937+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8938 else
8939- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8940- rt_sigreturn);
8941+ restorer = &frame->retcode;
8942 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8943
8944 /*
8945 * Not actually used anymore, but left because some gdb
8946 * versions need it.
8947 */
8948- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8949+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8950 } put_user_catch(err);
8951
8952 if (err)
8953diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8954index 4edd8eb..29124b4 100644
8955--- a/arch/x86/ia32/ia32entry.S
8956+++ b/arch/x86/ia32/ia32entry.S
8957@@ -13,7 +13,9 @@
8958 #include <asm/thread_info.h>
8959 #include <asm/segment.h>
8960 #include <asm/irqflags.h>
8961+#include <asm/pgtable.h>
8962 #include <linux/linkage.h>
8963+#include <asm/alternative-asm.h>
8964
8965 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8966 #include <linux/elf-em.h>
8967@@ -93,6 +95,32 @@ ENTRY(native_irq_enable_sysexit)
8968 ENDPROC(native_irq_enable_sysexit)
8969 #endif
8970
8971+ .macro pax_enter_kernel_user
8972+ pax_set_fptr_mask
8973+#ifdef CONFIG_PAX_MEMORY_UDEREF
8974+ call pax_enter_kernel_user
8975+#endif
8976+ .endm
8977+
8978+ .macro pax_exit_kernel_user
8979+#ifdef CONFIG_PAX_MEMORY_UDEREF
8980+ call pax_exit_kernel_user
8981+#endif
8982+#ifdef CONFIG_PAX_RANDKSTACK
8983+ pushq %rax
8984+ pushq %r11
8985+ call pax_randomize_kstack
8986+ popq %r11
8987+ popq %rax
8988+#endif
8989+ .endm
8990+
8991+.macro pax_erase_kstack
8992+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8993+ call pax_erase_kstack
8994+#endif
8995+.endm
8996+
8997 /*
8998 * 32bit SYSENTER instruction entry.
8999 *
9000@@ -119,12 +147,6 @@ ENTRY(ia32_sysenter_target)
9001 CFI_REGISTER rsp,rbp
9002 SWAPGS_UNSAFE_STACK
9003 movq PER_CPU_VAR(kernel_stack), %rsp
9004- addq $(KERNEL_STACK_OFFSET),%rsp
9005- /*
9006- * No need to follow this irqs on/off section: the syscall
9007- * disabled irqs, here we enable it straight after entry:
9008- */
9009- ENABLE_INTERRUPTS(CLBR_NONE)
9010 movl %ebp,%ebp /* zero extension */
9011 pushq $__USER32_DS
9012 CFI_ADJUST_CFA_OFFSET 8
9013@@ -135,28 +157,42 @@ ENTRY(ia32_sysenter_target)
9014 pushfq
9015 CFI_ADJUST_CFA_OFFSET 8
9016 /*CFI_REL_OFFSET rflags,0*/
9017- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
9018- CFI_REGISTER rip,r10
9019+ orl $X86_EFLAGS_IF,(%rsp)
9020+ GET_THREAD_INFO(%r11)
9021+ movl TI_sysenter_return(%r11), %r11d
9022+ CFI_REGISTER rip,r11
9023 pushq $__USER32_CS
9024 CFI_ADJUST_CFA_OFFSET 8
9025 /*CFI_REL_OFFSET cs,0*/
9026 movl %eax, %eax
9027- pushq %r10
9028+ pushq %r11
9029 CFI_ADJUST_CFA_OFFSET 8
9030 CFI_REL_OFFSET rip,0
9031 pushq %rax
9032 CFI_ADJUST_CFA_OFFSET 8
9033 cld
9034 SAVE_ARGS 0,0,1
9035+ pax_enter_kernel_user
9036+ /*
9037+ * No need to follow this irqs on/off section: the syscall
9038+ * disabled irqs, here we enable it straight after entry:
9039+ */
9040+ ENABLE_INTERRUPTS(CLBR_NONE)
9041 /* no need to do an access_ok check here because rbp has been
9042 32bit zero extended */
9043+
9044+#ifdef CONFIG_PAX_MEMORY_UDEREF
9045+ mov $PAX_USER_SHADOW_BASE,%r11
9046+ add %r11,%rbp
9047+#endif
9048+
9049 1: movl (%rbp),%ebp
9050 .section __ex_table,"a"
9051 .quad 1b,ia32_badarg
9052 .previous
9053- GET_THREAD_INFO(%r10)
9054- orl $TS_COMPAT,TI_status(%r10)
9055- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
9056+ GET_THREAD_INFO(%r11)
9057+ orl $TS_COMPAT,TI_status(%r11)
9058+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9059 CFI_REMEMBER_STATE
9060 jnz sysenter_tracesys
9061 cmpq $(IA32_NR_syscalls-1),%rax
9062@@ -166,13 +202,15 @@ sysenter_do_call:
9063 sysenter_dispatch:
9064 call *ia32_sys_call_table(,%rax,8)
9065 movq %rax,RAX-ARGOFFSET(%rsp)
9066- GET_THREAD_INFO(%r10)
9067+ GET_THREAD_INFO(%r11)
9068 DISABLE_INTERRUPTS(CLBR_NONE)
9069 TRACE_IRQS_OFF
9070- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
9071+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
9072 jnz sysexit_audit
9073 sysexit_from_sys_call:
9074- andl $~TS_COMPAT,TI_status(%r10)
9075+ pax_exit_kernel_user
9076+ pax_erase_kstack
9077+ andl $~TS_COMPAT,TI_status(%r11)
9078 /* clear IF, that popfq doesn't enable interrupts early */
9079 andl $~0x200,EFLAGS-R11(%rsp)
9080 movl RIP-R11(%rsp),%edx /* User %eip */
9081@@ -200,6 +238,9 @@ sysexit_from_sys_call:
9082 movl %eax,%esi /* 2nd arg: syscall number */
9083 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
9084 call audit_syscall_entry
9085+
9086+ pax_erase_kstack
9087+
9088 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
9089 cmpq $(IA32_NR_syscalls-1),%rax
9090 ja ia32_badsys
9091@@ -211,7 +252,7 @@ sysexit_from_sys_call:
9092 .endm
9093
9094 .macro auditsys_exit exit
9095- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
9096+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9097 jnz ia32_ret_from_sys_call
9098 TRACE_IRQS_ON
9099 sti
9100@@ -221,12 +262,12 @@ sysexit_from_sys_call:
9101 movzbl %al,%edi /* zero-extend that into %edi */
9102 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
9103 call audit_syscall_exit
9104- GET_THREAD_INFO(%r10)
9105+ GET_THREAD_INFO(%r11)
9106 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
9107 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
9108 cli
9109 TRACE_IRQS_OFF
9110- testl %edi,TI_flags(%r10)
9111+ testl %edi,TI_flags(%r11)
9112 jz \exit
9113 CLEAR_RREGS -ARGOFFSET
9114 jmp int_with_check
9115@@ -244,7 +285,7 @@ sysexit_audit:
9116
9117 sysenter_tracesys:
9118 #ifdef CONFIG_AUDITSYSCALL
9119- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
9120+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9121 jz sysenter_auditsys
9122 #endif
9123 SAVE_REST
9124@@ -252,6 +293,9 @@ sysenter_tracesys:
9125 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
9126 movq %rsp,%rdi /* &pt_regs -> arg1 */
9127 call syscall_trace_enter
9128+
9129+ pax_erase_kstack
9130+
9131 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
9132 RESTORE_REST
9133 cmpq $(IA32_NR_syscalls-1),%rax
9134@@ -283,19 +327,20 @@ ENDPROC(ia32_sysenter_target)
9135 ENTRY(ia32_cstar_target)
9136 CFI_STARTPROC32 simple
9137 CFI_SIGNAL_FRAME
9138- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
9139+ CFI_DEF_CFA rsp,0
9140 CFI_REGISTER rip,rcx
9141 /*CFI_REGISTER rflags,r11*/
9142 SWAPGS_UNSAFE_STACK
9143 movl %esp,%r8d
9144 CFI_REGISTER rsp,r8
9145 movq PER_CPU_VAR(kernel_stack),%rsp
9146+ SAVE_ARGS 8*6,1,1
9147+ pax_enter_kernel_user
9148 /*
9149 * No need to follow this irqs on/off section: the syscall
9150 * disabled irqs and here we enable it straight after entry:
9151 */
9152 ENABLE_INTERRUPTS(CLBR_NONE)
9153- SAVE_ARGS 8,1,1
9154 movl %eax,%eax /* zero extension */
9155 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
9156 movq %rcx,RIP-ARGOFFSET(%rsp)
9157@@ -311,13 +356,19 @@ ENTRY(ia32_cstar_target)
9158 /* no need to do an access_ok check here because r8 has been
9159 32bit zero extended */
9160 /* hardware stack frame is complete now */
9161+
9162+#ifdef CONFIG_PAX_MEMORY_UDEREF
9163+ mov $PAX_USER_SHADOW_BASE,%r11
9164+ add %r11,%r8
9165+#endif
9166+
9167 1: movl (%r8),%r9d
9168 .section __ex_table,"a"
9169 .quad 1b,ia32_badarg
9170 .previous
9171- GET_THREAD_INFO(%r10)
9172- orl $TS_COMPAT,TI_status(%r10)
9173- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
9174+ GET_THREAD_INFO(%r11)
9175+ orl $TS_COMPAT,TI_status(%r11)
9176+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9177 CFI_REMEMBER_STATE
9178 jnz cstar_tracesys
9179 cmpq $IA32_NR_syscalls-1,%rax
9180@@ -327,13 +378,15 @@ cstar_do_call:
9181 cstar_dispatch:
9182 call *ia32_sys_call_table(,%rax,8)
9183 movq %rax,RAX-ARGOFFSET(%rsp)
9184- GET_THREAD_INFO(%r10)
9185+ GET_THREAD_INFO(%r11)
9186 DISABLE_INTERRUPTS(CLBR_NONE)
9187 TRACE_IRQS_OFF
9188- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
9189+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
9190 jnz sysretl_audit
9191 sysretl_from_sys_call:
9192- andl $~TS_COMPAT,TI_status(%r10)
9193+ pax_exit_kernel_user
9194+ pax_erase_kstack
9195+ andl $~TS_COMPAT,TI_status(%r11)
9196 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
9197 movl RIP-ARGOFFSET(%rsp),%ecx
9198 CFI_REGISTER rip,rcx
9199@@ -361,7 +414,7 @@ sysretl_audit:
9200
9201 cstar_tracesys:
9202 #ifdef CONFIG_AUDITSYSCALL
9203- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
9204+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9205 jz cstar_auditsys
9206 #endif
9207 xchgl %r9d,%ebp
9208@@ -370,6 +423,9 @@ cstar_tracesys:
9209 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
9210 movq %rsp,%rdi /* &pt_regs -> arg1 */
9211 call syscall_trace_enter
9212+
9213+ pax_erase_kstack
9214+
9215 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
9216 RESTORE_REST
9217 xchgl %ebp,%r9d
9218@@ -415,11 +471,6 @@ ENTRY(ia32_syscall)
9219 CFI_REL_OFFSET rip,RIP-RIP
9220 PARAVIRT_ADJUST_EXCEPTION_FRAME
9221 SWAPGS
9222- /*
9223- * No need to follow this irqs on/off section: the syscall
9224- * disabled irqs and here we enable it straight after entry:
9225- */
9226- ENABLE_INTERRUPTS(CLBR_NONE)
9227 movl %eax,%eax
9228 pushq %rax
9229 CFI_ADJUST_CFA_OFFSET 8
9230@@ -427,9 +478,15 @@ ENTRY(ia32_syscall)
9231 /* note the registers are not zero extended to the sf.
9232 this could be a problem. */
9233 SAVE_ARGS 0,0,1
9234- GET_THREAD_INFO(%r10)
9235- orl $TS_COMPAT,TI_status(%r10)
9236- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
9237+ pax_enter_kernel_user
9238+ /*
9239+ * No need to follow this irqs on/off section: the syscall
9240+ * disabled irqs and here we enable it straight after entry:
9241+ */
9242+ ENABLE_INTERRUPTS(CLBR_NONE)
9243+ GET_THREAD_INFO(%r11)
9244+ orl $TS_COMPAT,TI_status(%r11)
9245+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9246 jnz ia32_tracesys
9247 cmpq $(IA32_NR_syscalls-1),%rax
9248 ja ia32_badsys
9249@@ -448,6 +505,9 @@ ia32_tracesys:
9250 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
9251 movq %rsp,%rdi /* &pt_regs -> arg1 */
9252 call syscall_trace_enter
9253+
9254+ pax_erase_kstack
9255+
9256 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
9257 RESTORE_REST
9258 cmpq $(IA32_NR_syscalls-1),%rax
9259@@ -462,6 +522,7 @@ ia32_badsys:
9260
9261 quiet_ni_syscall:
9262 movq $-ENOSYS,%rax
9263+ pax_force_retaddr
9264 ret
9265 CFI_ENDPROC
9266
9267diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
9268index 016218c..47ccbdd 100644
9269--- a/arch/x86/ia32/sys_ia32.c
9270+++ b/arch/x86/ia32/sys_ia32.c
9271@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
9272 */
9273 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
9274 {
9275- typeof(ubuf->st_uid) uid = 0;
9276- typeof(ubuf->st_gid) gid = 0;
9277+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
9278+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
9279 SET_UID(uid, stat->uid);
9280 SET_GID(gid, stat->gid);
9281 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
9282@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
9283 }
9284 set_fs(KERNEL_DS);
9285 ret = sys_rt_sigprocmask(how,
9286- set ? (sigset_t __user *)&s : NULL,
9287- oset ? (sigset_t __user *)&s : NULL,
9288+ set ? (sigset_t __force_user *)&s : NULL,
9289+ oset ? (sigset_t __force_user *)&s : NULL,
9290 sigsetsize);
9291 set_fs(old_fs);
9292 if (ret)
9293@@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
9294 mm_segment_t old_fs = get_fs();
9295
9296 set_fs(KERNEL_DS);
9297- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
9298+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
9299 set_fs(old_fs);
9300 if (put_compat_timespec(&t, interval))
9301 return -EFAULT;
9302@@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
9303 mm_segment_t old_fs = get_fs();
9304
9305 set_fs(KERNEL_DS);
9306- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
9307+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
9308 set_fs(old_fs);
9309 if (!ret) {
9310 switch (_NSIG_WORDS) {
9311@@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
9312 if (copy_siginfo_from_user32(&info, uinfo))
9313 return -EFAULT;
9314 set_fs(KERNEL_DS);
9315- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
9316+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
9317 set_fs(old_fs);
9318 return ret;
9319 }
9320@@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
9321 return -EFAULT;
9322
9323 set_fs(KERNEL_DS);
9324- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
9325+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
9326 count);
9327 set_fs(old_fs);
9328
9329diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
9330index e2077d3..17d07ad 100644
9331--- a/arch/x86/include/asm/alternative-asm.h
9332+++ b/arch/x86/include/asm/alternative-asm.h
9333@@ -8,10 +8,10 @@
9334
9335 #ifdef CONFIG_SMP
9336 .macro LOCK_PREFIX
9337-1: lock
9338+672: lock
9339 .section .smp_locks,"a"
9340 .align 4
9341- X86_ALIGN 1b
9342+ X86_ALIGN 672b
9343 .previous
9344 .endm
9345 #else
9346@@ -19,4 +19,43 @@
9347 .endm
9348 #endif
9349
9350+#ifdef KERNEXEC_PLUGIN
9351+ .macro pax_force_retaddr_bts rip=0
9352+ btsq $63,\rip(%rsp)
9353+ .endm
9354+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9355+ .macro pax_force_retaddr rip=0, reload=0
9356+ btsq $63,\rip(%rsp)
9357+ .endm
9358+ .macro pax_force_fptr ptr
9359+ btsq $63,\ptr
9360+ .endm
9361+ .macro pax_set_fptr_mask
9362+ .endm
9363+#endif
9364+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
9365+ .macro pax_force_retaddr rip=0, reload=0
9366+ .if \reload
9367+ pax_set_fptr_mask
9368+ .endif
9369+ orq %r10,\rip(%rsp)
9370+ .endm
9371+ .macro pax_force_fptr ptr
9372+ orq %r10,\ptr
9373+ .endm
9374+ .macro pax_set_fptr_mask
9375+ movabs $0x8000000000000000,%r10
9376+ .endm
9377+#endif
9378+#else
9379+ .macro pax_force_retaddr rip=0, reload=0
9380+ .endm
9381+ .macro pax_force_fptr ptr
9382+ .endm
9383+ .macro pax_force_retaddr_bts rip=0
9384+ .endm
9385+ .macro pax_set_fptr_mask
9386+ .endm
9387+#endif
9388+
9389 #endif /* __ASSEMBLY__ */
9390diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
9391index c240efc..fdfadf3 100644
9392--- a/arch/x86/include/asm/alternative.h
9393+++ b/arch/x86/include/asm/alternative.h
9394@@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
9395 " .byte 662b-661b\n" /* sourcelen */ \
9396 " .byte 664f-663f\n" /* replacementlen */ \
9397 ".previous\n" \
9398- ".section .altinstr_replacement, \"ax\"\n" \
9399+ ".section .altinstr_replacement, \"a\"\n" \
9400 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
9401 ".previous"
9402
9403diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
9404index 474d80d..1f97d58 100644
9405--- a/arch/x86/include/asm/apic.h
9406+++ b/arch/x86/include/asm/apic.h
9407@@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
9408
9409 #ifdef CONFIG_X86_LOCAL_APIC
9410
9411-extern unsigned int apic_verbosity;
9412+extern int apic_verbosity;
9413 extern int local_apic_timer_c2_ok;
9414
9415 extern int disable_apic;
9416diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
9417index 20370c6..a2eb9b0 100644
9418--- a/arch/x86/include/asm/apm.h
9419+++ b/arch/x86/include/asm/apm.h
9420@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
9421 __asm__ __volatile__(APM_DO_ZERO_SEGS
9422 "pushl %%edi\n\t"
9423 "pushl %%ebp\n\t"
9424- "lcall *%%cs:apm_bios_entry\n\t"
9425+ "lcall *%%ss:apm_bios_entry\n\t"
9426 "setc %%al\n\t"
9427 "popl %%ebp\n\t"
9428 "popl %%edi\n\t"
9429@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
9430 __asm__ __volatile__(APM_DO_ZERO_SEGS
9431 "pushl %%edi\n\t"
9432 "pushl %%ebp\n\t"
9433- "lcall *%%cs:apm_bios_entry\n\t"
9434+ "lcall *%%ss:apm_bios_entry\n\t"
9435 "setc %%bl\n\t"
9436 "popl %%ebp\n\t"
9437 "popl %%edi\n\t"
9438diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
9439index dc5a667..939040c 100644
9440--- a/arch/x86/include/asm/atomic_32.h
9441+++ b/arch/x86/include/asm/atomic_32.h
9442@@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
9443 }
9444
9445 /**
9446+ * atomic_read_unchecked - read atomic variable
9447+ * @v: pointer of type atomic_unchecked_t
9448+ *
9449+ * Atomically reads the value of @v.
9450+ */
9451+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9452+{
9453+ return v->counter;
9454+}
9455+
9456+/**
9457 * atomic_set - set atomic variable
9458 * @v: pointer of type atomic_t
9459 * @i: required value
9460@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
9461 }
9462
9463 /**
9464+ * atomic_set_unchecked - set atomic variable
9465+ * @v: pointer of type atomic_unchecked_t
9466+ * @i: required value
9467+ *
9468+ * Atomically sets the value of @v to @i.
9469+ */
9470+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9471+{
9472+ v->counter = i;
9473+}
9474+
9475+/**
9476 * atomic_add - add integer to atomic variable
9477 * @i: integer value to add
9478 * @v: pointer of type atomic_t
9479@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
9480 */
9481 static inline void atomic_add(int i, atomic_t *v)
9482 {
9483- asm volatile(LOCK_PREFIX "addl %1,%0"
9484+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9485+
9486+#ifdef CONFIG_PAX_REFCOUNT
9487+ "jno 0f\n"
9488+ LOCK_PREFIX "subl %1,%0\n"
9489+ "int $4\n0:\n"
9490+ _ASM_EXTABLE(0b, 0b)
9491+#endif
9492+
9493+ : "+m" (v->counter)
9494+ : "ir" (i));
9495+}
9496+
9497+/**
9498+ * atomic_add_unchecked - add integer to atomic variable
9499+ * @i: integer value to add
9500+ * @v: pointer of type atomic_unchecked_t
9501+ *
9502+ * Atomically adds @i to @v.
9503+ */
9504+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9505+{
9506+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9507 : "+m" (v->counter)
9508 : "ir" (i));
9509 }
9510@@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
9511 */
9512 static inline void atomic_sub(int i, atomic_t *v)
9513 {
9514- asm volatile(LOCK_PREFIX "subl %1,%0"
9515+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9516+
9517+#ifdef CONFIG_PAX_REFCOUNT
9518+ "jno 0f\n"
9519+ LOCK_PREFIX "addl %1,%0\n"
9520+ "int $4\n0:\n"
9521+ _ASM_EXTABLE(0b, 0b)
9522+#endif
9523+
9524+ : "+m" (v->counter)
9525+ : "ir" (i));
9526+}
9527+
9528+/**
9529+ * atomic_sub_unchecked - subtract integer from atomic variable
9530+ * @i: integer value to subtract
9531+ * @v: pointer of type atomic_unchecked_t
9532+ *
9533+ * Atomically subtracts @i from @v.
9534+ */
9535+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9536+{
9537+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9538 : "+m" (v->counter)
9539 : "ir" (i));
9540 }
9541@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9542 {
9543 unsigned char c;
9544
9545- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9546+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
9547+
9548+#ifdef CONFIG_PAX_REFCOUNT
9549+ "jno 0f\n"
9550+ LOCK_PREFIX "addl %2,%0\n"
9551+ "int $4\n0:\n"
9552+ _ASM_EXTABLE(0b, 0b)
9553+#endif
9554+
9555+ "sete %1\n"
9556 : "+m" (v->counter), "=qm" (c)
9557 : "ir" (i) : "memory");
9558 return c;
9559@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9560 */
9561 static inline void atomic_inc(atomic_t *v)
9562 {
9563- asm volatile(LOCK_PREFIX "incl %0"
9564+ asm volatile(LOCK_PREFIX "incl %0\n"
9565+
9566+#ifdef CONFIG_PAX_REFCOUNT
9567+ "jno 0f\n"
9568+ LOCK_PREFIX "decl %0\n"
9569+ "int $4\n0:\n"
9570+ _ASM_EXTABLE(0b, 0b)
9571+#endif
9572+
9573+ : "+m" (v->counter));
9574+}
9575+
9576+/**
9577+ * atomic_inc_unchecked - increment atomic variable
9578+ * @v: pointer of type atomic_unchecked_t
9579+ *
9580+ * Atomically increments @v by 1.
9581+ */
9582+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9583+{
9584+ asm volatile(LOCK_PREFIX "incl %0\n"
9585 : "+m" (v->counter));
9586 }
9587
9588@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
9589 */
9590 static inline void atomic_dec(atomic_t *v)
9591 {
9592- asm volatile(LOCK_PREFIX "decl %0"
9593+ asm volatile(LOCK_PREFIX "decl %0\n"
9594+
9595+#ifdef CONFIG_PAX_REFCOUNT
9596+ "jno 0f\n"
9597+ LOCK_PREFIX "incl %0\n"
9598+ "int $4\n0:\n"
9599+ _ASM_EXTABLE(0b, 0b)
9600+#endif
9601+
9602+ : "+m" (v->counter));
9603+}
9604+
9605+/**
9606+ * atomic_dec_unchecked - decrement atomic variable
9607+ * @v: pointer of type atomic_unchecked_t
9608+ *
9609+ * Atomically decrements @v by 1.
9610+ */
9611+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9612+{
9613+ asm volatile(LOCK_PREFIX "decl %0\n"
9614 : "+m" (v->counter));
9615 }
9616
9617@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9618 {
9619 unsigned char c;
9620
9621- asm volatile(LOCK_PREFIX "decl %0; sete %1"
9622+ asm volatile(LOCK_PREFIX "decl %0\n"
9623+
9624+#ifdef CONFIG_PAX_REFCOUNT
9625+ "jno 0f\n"
9626+ LOCK_PREFIX "incl %0\n"
9627+ "int $4\n0:\n"
9628+ _ASM_EXTABLE(0b, 0b)
9629+#endif
9630+
9631+ "sete %1\n"
9632 : "+m" (v->counter), "=qm" (c)
9633 : : "memory");
9634 return c != 0;
9635@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9636 {
9637 unsigned char c;
9638
9639- asm volatile(LOCK_PREFIX "incl %0; sete %1"
9640+ asm volatile(LOCK_PREFIX "incl %0\n"
9641+
9642+#ifdef CONFIG_PAX_REFCOUNT
9643+ "jno 0f\n"
9644+ LOCK_PREFIX "decl %0\n"
9645+ "into\n0:\n"
9646+ _ASM_EXTABLE(0b, 0b)
9647+#endif
9648+
9649+ "sete %1\n"
9650+ : "+m" (v->counter), "=qm" (c)
9651+ : : "memory");
9652+ return c != 0;
9653+}
9654+
9655+/**
9656+ * atomic_inc_and_test_unchecked - increment and test
9657+ * @v: pointer of type atomic_unchecked_t
9658+ *
9659+ * Atomically increments @v by 1
9660+ * and returns true if the result is zero, or false for all
9661+ * other cases.
9662+ */
9663+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9664+{
9665+ unsigned char c;
9666+
9667+ asm volatile(LOCK_PREFIX "incl %0\n"
9668+ "sete %1\n"
9669 : "+m" (v->counter), "=qm" (c)
9670 : : "memory");
9671 return c != 0;
9672@@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9673 {
9674 unsigned char c;
9675
9676- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9677+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
9678+
9679+#ifdef CONFIG_PAX_REFCOUNT
9680+ "jno 0f\n"
9681+ LOCK_PREFIX "subl %2,%0\n"
9682+ "int $4\n0:\n"
9683+ _ASM_EXTABLE(0b, 0b)
9684+#endif
9685+
9686+ "sets %1\n"
9687 : "+m" (v->counter), "=qm" (c)
9688 : "ir" (i) : "memory");
9689 return c;
9690@@ -179,7 +341,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
9691 #endif
9692 /* Modern 486+ processor */
9693 __i = i;
9694- asm volatile(LOCK_PREFIX "xaddl %0, %1"
9695+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9696+
9697+#ifdef CONFIG_PAX_REFCOUNT
9698+ "jno 0f\n"
9699+ "movl %0, %1\n"
9700+ "int $4\n0:\n"
9701+ _ASM_EXTABLE(0b, 0b)
9702+#endif
9703+
9704 : "+r" (i), "+m" (v->counter)
9705 : : "memory");
9706 return i + __i;
9707@@ -195,6 +365,38 @@ no_xadd: /* Legacy 386 processor */
9708 }
9709
9710 /**
9711+ * atomic_add_return_unchecked - add integer and return
9712+ * @v: pointer of type atomic_unchecked_t
9713+ * @i: integer value to add
9714+ *
9715+ * Atomically adds @i to @v and returns @i + @v
9716+ */
9717+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9718+{
9719+ int __i;
9720+#ifdef CONFIG_M386
9721+ unsigned long flags;
9722+ if (unlikely(boot_cpu_data.x86 <= 3))
9723+ goto no_xadd;
9724+#endif
9725+ /* Modern 486+ processor */
9726+ __i = i;
9727+ asm volatile(LOCK_PREFIX "xaddl %0, %1"
9728+ : "+r" (i), "+m" (v->counter)
9729+ : : "memory");
9730+ return i + __i;
9731+
9732+#ifdef CONFIG_M386
9733+no_xadd: /* Legacy 386 processor */
9734+ local_irq_save(flags);
9735+ __i = atomic_read_unchecked(v);
9736+ atomic_set_unchecked(v, i + __i);
9737+ local_irq_restore(flags);
9738+ return i + __i;
9739+#endif
9740+}
9741+
9742+/**
9743 * atomic_sub_return - subtract integer and return
9744 * @v: pointer of type atomic_t
9745 * @i: integer value to subtract
9746@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9747 return cmpxchg(&v->counter, old, new);
9748 }
9749
9750+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9751+{
9752+ return cmpxchg(&v->counter, old, new);
9753+}
9754+
9755 static inline int atomic_xchg(atomic_t *v, int new)
9756 {
9757 return xchg(&v->counter, new);
9758 }
9759
9760+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9761+{
9762+ return xchg(&v->counter, new);
9763+}
9764+
9765 /**
9766 * atomic_add_unless - add unless the number is already a given value
9767 * @v: pointer of type atomic_t
9768@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
9769 */
9770 static inline int atomic_add_unless(atomic_t *v, int a, int u)
9771 {
9772- int c, old;
9773+ int c, old, new;
9774 c = atomic_read(v);
9775 for (;;) {
9776- if (unlikely(c == (u)))
9777+ if (unlikely(c == u))
9778 break;
9779- old = atomic_cmpxchg((v), c, c + (a));
9780+
9781+ asm volatile("addl %2,%0\n"
9782+
9783+#ifdef CONFIG_PAX_REFCOUNT
9784+ "jno 0f\n"
9785+ "subl %2,%0\n"
9786+ "int $4\n0:\n"
9787+ _ASM_EXTABLE(0b, 0b)
9788+#endif
9789+
9790+ : "=r" (new)
9791+ : "0" (c), "ir" (a));
9792+
9793+ old = atomic_cmpxchg(v, c, new);
9794 if (likely(old == c))
9795 break;
9796 c = old;
9797 }
9798- return c != (u);
9799+ return c != u;
9800 }
9801
9802 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
9803
9804 #define atomic_inc_return(v) (atomic_add_return(1, v))
9805+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9806+{
9807+ return atomic_add_return_unchecked(1, v);
9808+}
9809 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9810
9811 /* These are x86-specific, used by some header files */
9812@@ -266,9 +495,18 @@ typedef struct {
9813 u64 __aligned(8) counter;
9814 } atomic64_t;
9815
9816+#ifdef CONFIG_PAX_REFCOUNT
9817+typedef struct {
9818+ u64 __aligned(8) counter;
9819+} atomic64_unchecked_t;
9820+#else
9821+typedef atomic64_t atomic64_unchecked_t;
9822+#endif
9823+
9824 #define ATOMIC64_INIT(val) { (val) }
9825
9826 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
9827+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
9828
9829 /**
9830 * atomic64_xchg - xchg atomic64 variable
9831@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
9832 * the old value.
9833 */
9834 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
9835+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
9836
9837 /**
9838 * atomic64_set - set atomic64 variable
9839@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
9840 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
9841
9842 /**
9843+ * atomic64_unchecked_set - set atomic64 variable
9844+ * @ptr: pointer to type atomic64_unchecked_t
9845+ * @new_val: value to assign
9846+ *
9847+ * Atomically sets the value of @ptr to @new_val.
9848+ */
9849+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
9850+
9851+/**
9852 * atomic64_read - read atomic64 variable
9853 * @ptr: pointer to type atomic64_t
9854 *
9855@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
9856 return res;
9857 }
9858
9859-extern u64 atomic64_read(atomic64_t *ptr);
9860+/**
9861+ * atomic64_read_unchecked - read atomic64 variable
9862+ * @ptr: pointer to type atomic64_unchecked_t
9863+ *
9864+ * Atomically reads the value of @ptr and returns it.
9865+ */
9866+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
9867+{
9868+ u64 res;
9869+
9870+ /*
9871+ * Note, we inline this atomic64_unchecked_t primitive because
9872+ * it only clobbers EAX/EDX and leaves the others
9873+ * untouched. We also (somewhat subtly) rely on the
9874+ * fact that cmpxchg8b returns the current 64-bit value
9875+ * of the memory location we are touching:
9876+ */
9877+ asm volatile(
9878+ "mov %%ebx, %%eax\n\t"
9879+ "mov %%ecx, %%edx\n\t"
9880+ LOCK_PREFIX "cmpxchg8b %1\n"
9881+ : "=&A" (res)
9882+ : "m" (*ptr)
9883+ );
9884+
9885+ return res;
9886+}
9887
9888 /**
9889 * atomic64_add_return - add and return
9890@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
9891 * Other variants with different arithmetic operators:
9892 */
9893 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
9894+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9895 extern u64 atomic64_inc_return(atomic64_t *ptr);
9896+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
9897 extern u64 atomic64_dec_return(atomic64_t *ptr);
9898+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
9899
9900 /**
9901 * atomic64_add - add integer to atomic64 variable
9902@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
9903 extern void atomic64_add(u64 delta, atomic64_t *ptr);
9904
9905 /**
9906+ * atomic64_add_unchecked - add integer to atomic64 variable
9907+ * @delta: integer value to add
9908+ * @ptr: pointer to type atomic64_unchecked_t
9909+ *
9910+ * Atomically adds @delta to @ptr.
9911+ */
9912+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9913+
9914+/**
9915 * atomic64_sub - subtract the atomic64 variable
9916 * @delta: integer value to subtract
9917 * @ptr: pointer to type atomic64_t
9918@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
9919 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
9920
9921 /**
9922+ * atomic64_sub_unchecked - subtract the atomic64 variable
9923+ * @delta: integer value to subtract
9924+ * @ptr: pointer to type atomic64_unchecked_t
9925+ *
9926+ * Atomically subtracts @delta from @ptr.
9927+ */
9928+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9929+
9930+/**
9931 * atomic64_sub_and_test - subtract value from variable and test result
9932 * @delta: integer value to subtract
9933 * @ptr: pointer to type atomic64_t
9934@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
9935 extern void atomic64_inc(atomic64_t *ptr);
9936
9937 /**
9938+ * atomic64_inc_unchecked - increment atomic64 variable
9939+ * @ptr: pointer to type atomic64_unchecked_t
9940+ *
9941+ * Atomically increments @ptr by 1.
9942+ */
9943+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
9944+
9945+/**
9946 * atomic64_dec - decrement atomic64 variable
9947 * @ptr: pointer to type atomic64_t
9948 *
9949@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
9950 extern void atomic64_dec(atomic64_t *ptr);
9951
9952 /**
9953+ * atomic64_dec_unchecked - decrement atomic64 variable
9954+ * @ptr: pointer to type atomic64_unchecked_t
9955+ *
9956+ * Atomically decrements @ptr by 1.
9957+ */
9958+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
9959+
9960+/**
9961 * atomic64_dec_and_test - decrement and test
9962 * @ptr: pointer to type atomic64_t
9963 *
9964diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
9965index d605dc2..fafd7bd 100644
9966--- a/arch/x86/include/asm/atomic_64.h
9967+++ b/arch/x86/include/asm/atomic_64.h
9968@@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
9969 }
9970
9971 /**
9972+ * atomic_read_unchecked - read atomic variable
9973+ * @v: pointer of type atomic_unchecked_t
9974+ *
9975+ * Atomically reads the value of @v.
9976+ */
9977+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9978+{
9979+ return v->counter;
9980+}
9981+
9982+/**
9983 * atomic_set - set atomic variable
9984 * @v: pointer of type atomic_t
9985 * @i: required value
9986@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
9987 }
9988
9989 /**
9990+ * atomic_set_unchecked - set atomic variable
9991+ * @v: pointer of type atomic_unchecked_t
9992+ * @i: required value
9993+ *
9994+ * Atomically sets the value of @v to @i.
9995+ */
9996+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9997+{
9998+ v->counter = i;
9999+}
10000+
10001+/**
10002 * atomic_add - add integer to atomic variable
10003 * @i: integer value to add
10004 * @v: pointer of type atomic_t
10005@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
10006 */
10007 static inline void atomic_add(int i, atomic_t *v)
10008 {
10009- asm volatile(LOCK_PREFIX "addl %1,%0"
10010+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
10011+
10012+#ifdef CONFIG_PAX_REFCOUNT
10013+ "jno 0f\n"
10014+ LOCK_PREFIX "subl %1,%0\n"
10015+ "int $4\n0:\n"
10016+ _ASM_EXTABLE(0b, 0b)
10017+#endif
10018+
10019+ : "=m" (v->counter)
10020+ : "ir" (i), "m" (v->counter));
10021+}
10022+
10023+/**
10024+ * atomic_add_unchecked - add integer to atomic variable
10025+ * @i: integer value to add
10026+ * @v: pointer of type atomic_unchecked_t
10027+ *
10028+ * Atomically adds @i to @v.
10029+ */
10030+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
10031+{
10032+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
10033 : "=m" (v->counter)
10034 : "ir" (i), "m" (v->counter));
10035 }
10036@@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
10037 */
10038 static inline void atomic_sub(int i, atomic_t *v)
10039 {
10040- asm volatile(LOCK_PREFIX "subl %1,%0"
10041+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
10042+
10043+#ifdef CONFIG_PAX_REFCOUNT
10044+ "jno 0f\n"
10045+ LOCK_PREFIX "addl %1,%0\n"
10046+ "int $4\n0:\n"
10047+ _ASM_EXTABLE(0b, 0b)
10048+#endif
10049+
10050+ : "=m" (v->counter)
10051+ : "ir" (i), "m" (v->counter));
10052+}
10053+
10054+/**
10055+ * atomic_sub_unchecked - subtract the atomic variable
10056+ * @i: integer value to subtract
10057+ * @v: pointer of type atomic_unchecked_t
10058+ *
10059+ * Atomically subtracts @i from @v.
10060+ */
10061+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
10062+{
10063+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
10064 : "=m" (v->counter)
10065 : "ir" (i), "m" (v->counter));
10066 }
10067@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
10068 {
10069 unsigned char c;
10070
10071- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
10072+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
10073+
10074+#ifdef CONFIG_PAX_REFCOUNT
10075+ "jno 0f\n"
10076+ LOCK_PREFIX "addl %2,%0\n"
10077+ "int $4\n0:\n"
10078+ _ASM_EXTABLE(0b, 0b)
10079+#endif
10080+
10081+ "sete %1\n"
10082 : "=m" (v->counter), "=qm" (c)
10083 : "ir" (i), "m" (v->counter) : "memory");
10084 return c;
10085@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
10086 */
10087 static inline void atomic_inc(atomic_t *v)
10088 {
10089- asm volatile(LOCK_PREFIX "incl %0"
10090+ asm volatile(LOCK_PREFIX "incl %0\n"
10091+
10092+#ifdef CONFIG_PAX_REFCOUNT
10093+ "jno 0f\n"
10094+ LOCK_PREFIX "decl %0\n"
10095+ "int $4\n0:\n"
10096+ _ASM_EXTABLE(0b, 0b)
10097+#endif
10098+
10099+ : "=m" (v->counter)
10100+ : "m" (v->counter));
10101+}
10102+
10103+/**
10104+ * atomic_inc_unchecked - increment atomic variable
10105+ * @v: pointer of type atomic_unchecked_t
10106+ *
10107+ * Atomically increments @v by 1.
10108+ */
10109+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
10110+{
10111+ asm volatile(LOCK_PREFIX "incl %0\n"
10112 : "=m" (v->counter)
10113 : "m" (v->counter));
10114 }
10115@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
10116 */
10117 static inline void atomic_dec(atomic_t *v)
10118 {
10119- asm volatile(LOCK_PREFIX "decl %0"
10120+ asm volatile(LOCK_PREFIX "decl %0\n"
10121+
10122+#ifdef CONFIG_PAX_REFCOUNT
10123+ "jno 0f\n"
10124+ LOCK_PREFIX "incl %0\n"
10125+ "int $4\n0:\n"
10126+ _ASM_EXTABLE(0b, 0b)
10127+#endif
10128+
10129+ : "=m" (v->counter)
10130+ : "m" (v->counter));
10131+}
10132+
10133+/**
10134+ * atomic_dec_unchecked - decrement atomic variable
10135+ * @v: pointer of type atomic_unchecked_t
10136+ *
10137+ * Atomically decrements @v by 1.
10138+ */
10139+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
10140+{
10141+ asm volatile(LOCK_PREFIX "decl %0\n"
10142 : "=m" (v->counter)
10143 : "m" (v->counter));
10144 }
10145@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
10146 {
10147 unsigned char c;
10148
10149- asm volatile(LOCK_PREFIX "decl %0; sete %1"
10150+ asm volatile(LOCK_PREFIX "decl %0\n"
10151+
10152+#ifdef CONFIG_PAX_REFCOUNT
10153+ "jno 0f\n"
10154+ LOCK_PREFIX "incl %0\n"
10155+ "int $4\n0:\n"
10156+ _ASM_EXTABLE(0b, 0b)
10157+#endif
10158+
10159+ "sete %1\n"
10160 : "=m" (v->counter), "=qm" (c)
10161 : "m" (v->counter) : "memory");
10162 return c != 0;
10163@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
10164 {
10165 unsigned char c;
10166
10167- asm volatile(LOCK_PREFIX "incl %0; sete %1"
10168+ asm volatile(LOCK_PREFIX "incl %0\n"
10169+
10170+#ifdef CONFIG_PAX_REFCOUNT
10171+ "jno 0f\n"
10172+ LOCK_PREFIX "decl %0\n"
10173+ "int $4\n0:\n"
10174+ _ASM_EXTABLE(0b, 0b)
10175+#endif
10176+
10177+ "sete %1\n"
10178+ : "=m" (v->counter), "=qm" (c)
10179+ : "m" (v->counter) : "memory");
10180+ return c != 0;
10181+}
10182+
10183+/**
10184+ * atomic_inc_and_test_unchecked - increment and test
10185+ * @v: pointer of type atomic_unchecked_t
10186+ *
10187+ * Atomically increments @v by 1
10188+ * and returns true if the result is zero, or false for all
10189+ * other cases.
10190+ */
10191+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
10192+{
10193+ unsigned char c;
10194+
10195+ asm volatile(LOCK_PREFIX "incl %0\n"
10196+ "sete %1\n"
10197 : "=m" (v->counter), "=qm" (c)
10198 : "m" (v->counter) : "memory");
10199 return c != 0;
10200@@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
10201 {
10202 unsigned char c;
10203
10204- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
10205+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
10206+
10207+#ifdef CONFIG_PAX_REFCOUNT
10208+ "jno 0f\n"
10209+ LOCK_PREFIX "subl %2,%0\n"
10210+ "int $4\n0:\n"
10211+ _ASM_EXTABLE(0b, 0b)
10212+#endif
10213+
10214+ "sets %1\n"
10215 : "=m" (v->counter), "=qm" (c)
10216 : "ir" (i), "m" (v->counter) : "memory");
10217 return c;
10218@@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
10219 static inline int atomic_add_return(int i, atomic_t *v)
10220 {
10221 int __i = i;
10222- asm volatile(LOCK_PREFIX "xaddl %0, %1"
10223+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
10224+
10225+#ifdef CONFIG_PAX_REFCOUNT
10226+ "jno 0f\n"
10227+ "movl %0, %1\n"
10228+ "int $4\n0:\n"
10229+ _ASM_EXTABLE(0b, 0b)
10230+#endif
10231+
10232+ : "+r" (i), "+m" (v->counter)
10233+ : : "memory");
10234+ return i + __i;
10235+}
10236+
10237+/**
10238+ * atomic_add_return_unchecked - add and return
10239+ * @i: integer value to add
10240+ * @v: pointer of type atomic_unchecked_t
10241+ *
10242+ * Atomically adds @i to @v and returns @i + @v
10243+ */
10244+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
10245+{
10246+ int __i = i;
10247+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
10248 : "+r" (i), "+m" (v->counter)
10249 : : "memory");
10250 return i + __i;
10251@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
10252 }
10253
10254 #define atomic_inc_return(v) (atomic_add_return(1, v))
10255+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
10256+{
10257+ return atomic_add_return_unchecked(1, v);
10258+}
10259 #define atomic_dec_return(v) (atomic_sub_return(1, v))
10260
10261 /* The 64-bit atomic type */
10262@@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
10263 }
10264
10265 /**
10266+ * atomic64_read_unchecked - read atomic64 variable
10267+ * @v: pointer of type atomic64_unchecked_t
10268+ *
10269+ * Atomically reads the value of @v.
10270+ * Doesn't imply a read memory barrier.
10271+ */
10272+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
10273+{
10274+ return v->counter;
10275+}
10276+
10277+/**
10278 * atomic64_set - set atomic64 variable
10279 * @v: pointer to type atomic64_t
10280 * @i: required value
10281@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
10282 }
10283
10284 /**
10285+ * atomic64_set_unchecked - set atomic64 variable
10286+ * @v: pointer to type atomic64_unchecked_t
10287+ * @i: required value
10288+ *
10289+ * Atomically sets the value of @v to @i.
10290+ */
10291+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
10292+{
10293+ v->counter = i;
10294+}
10295+
10296+/**
10297 * atomic64_add - add integer to atomic64 variable
10298 * @i: integer value to add
10299 * @v: pointer to type atomic64_t
10300@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
10301 */
10302 static inline void atomic64_add(long i, atomic64_t *v)
10303 {
10304+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
10305+
10306+#ifdef CONFIG_PAX_REFCOUNT
10307+ "jno 0f\n"
10308+ LOCK_PREFIX "subq %1,%0\n"
10309+ "int $4\n0:\n"
10310+ _ASM_EXTABLE(0b, 0b)
10311+#endif
10312+
10313+ : "=m" (v->counter)
10314+ : "er" (i), "m" (v->counter));
10315+}
10316+
10317+/**
10318+ * atomic64_add_unchecked - add integer to atomic64 variable
10319+ * @i: integer value to add
10320+ * @v: pointer to type atomic64_unchecked_t
10321+ *
10322+ * Atomically adds @i to @v.
10323+ */
10324+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
10325+{
10326 asm volatile(LOCK_PREFIX "addq %1,%0"
10327 : "=m" (v->counter)
10328 : "er" (i), "m" (v->counter));
10329@@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
10330 */
10331 static inline void atomic64_sub(long i, atomic64_t *v)
10332 {
10333- asm volatile(LOCK_PREFIX "subq %1,%0"
10334+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
10335+
10336+#ifdef CONFIG_PAX_REFCOUNT
10337+ "jno 0f\n"
10338+ LOCK_PREFIX "addq %1,%0\n"
10339+ "int $4\n0:\n"
10340+ _ASM_EXTABLE(0b, 0b)
10341+#endif
10342+
10343 : "=m" (v->counter)
10344 : "er" (i), "m" (v->counter));
10345 }
10346@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
10347 {
10348 unsigned char c;
10349
10350- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
10351+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
10352+
10353+#ifdef CONFIG_PAX_REFCOUNT
10354+ "jno 0f\n"
10355+ LOCK_PREFIX "addq %2,%0\n"
10356+ "int $4\n0:\n"
10357+ _ASM_EXTABLE(0b, 0b)
10358+#endif
10359+
10360+ "sete %1\n"
10361 : "=m" (v->counter), "=qm" (c)
10362 : "er" (i), "m" (v->counter) : "memory");
10363 return c;
10364@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
10365 */
10366 static inline void atomic64_inc(atomic64_t *v)
10367 {
10368+ asm volatile(LOCK_PREFIX "incq %0\n"
10369+
10370+#ifdef CONFIG_PAX_REFCOUNT
10371+ "jno 0f\n"
10372+ LOCK_PREFIX "decq %0\n"
10373+ "int $4\n0:\n"
10374+ _ASM_EXTABLE(0b, 0b)
10375+#endif
10376+
10377+ : "=m" (v->counter)
10378+ : "m" (v->counter));
10379+}
10380+
10381+/**
10382+ * atomic64_inc_unchecked - increment atomic64 variable
10383+ * @v: pointer to type atomic64_unchecked_t
10384+ *
10385+ * Atomically increments @v by 1.
10386+ */
10387+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
10388+{
10389 asm volatile(LOCK_PREFIX "incq %0"
10390 : "=m" (v->counter)
10391 : "m" (v->counter));
10392@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
10393 */
10394 static inline void atomic64_dec(atomic64_t *v)
10395 {
10396- asm volatile(LOCK_PREFIX "decq %0"
10397+ asm volatile(LOCK_PREFIX "decq %0\n"
10398+
10399+#ifdef CONFIG_PAX_REFCOUNT
10400+ "jno 0f\n"
10401+ LOCK_PREFIX "incq %0\n"
10402+ "int $4\n0:\n"
10403+ _ASM_EXTABLE(0b, 0b)
10404+#endif
10405+
10406+ : "=m" (v->counter)
10407+ : "m" (v->counter));
10408+}
10409+
10410+/**
10411+ * atomic64_dec_unchecked - decrement atomic64 variable
10412+ * @v: pointer to type atomic64_t
10413+ *
10414+ * Atomically decrements @v by 1.
10415+ */
10416+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
10417+{
10418+ asm volatile(LOCK_PREFIX "decq %0\n"
10419 : "=m" (v->counter)
10420 : "m" (v->counter));
10421 }
10422@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
10423 {
10424 unsigned char c;
10425
10426- asm volatile(LOCK_PREFIX "decq %0; sete %1"
10427+ asm volatile(LOCK_PREFIX "decq %0\n"
10428+
10429+#ifdef CONFIG_PAX_REFCOUNT
10430+ "jno 0f\n"
10431+ LOCK_PREFIX "incq %0\n"
10432+ "int $4\n0:\n"
10433+ _ASM_EXTABLE(0b, 0b)
10434+#endif
10435+
10436+ "sete %1\n"
10437 : "=m" (v->counter), "=qm" (c)
10438 : "m" (v->counter) : "memory");
10439 return c != 0;
10440@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
10441 {
10442 unsigned char c;
10443
10444- asm volatile(LOCK_PREFIX "incq %0; sete %1"
10445+ asm volatile(LOCK_PREFIX "incq %0\n"
10446+
10447+#ifdef CONFIG_PAX_REFCOUNT
10448+ "jno 0f\n"
10449+ LOCK_PREFIX "decq %0\n"
10450+ "int $4\n0:\n"
10451+ _ASM_EXTABLE(0b, 0b)
10452+#endif
10453+
10454+ "sete %1\n"
10455 : "=m" (v->counter), "=qm" (c)
10456 : "m" (v->counter) : "memory");
10457 return c != 0;
10458@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10459 {
10460 unsigned char c;
10461
10462- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
10463+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
10464+
10465+#ifdef CONFIG_PAX_REFCOUNT
10466+ "jno 0f\n"
10467+ LOCK_PREFIX "subq %2,%0\n"
10468+ "int $4\n0:\n"
10469+ _ASM_EXTABLE(0b, 0b)
10470+#endif
10471+
10472+ "sets %1\n"
10473 : "=m" (v->counter), "=qm" (c)
10474 : "er" (i), "m" (v->counter) : "memory");
10475 return c;
10476@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10477 static inline long atomic64_add_return(long i, atomic64_t *v)
10478 {
10479 long __i = i;
10480- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
10481+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
10482+
10483+#ifdef CONFIG_PAX_REFCOUNT
10484+ "jno 0f\n"
10485+ "movq %0, %1\n"
10486+ "int $4\n0:\n"
10487+ _ASM_EXTABLE(0b, 0b)
10488+#endif
10489+
10490+ : "+r" (i), "+m" (v->counter)
10491+ : : "memory");
10492+ return i + __i;
10493+}
10494+
10495+/**
10496+ * atomic64_add_return_unchecked - add and return
10497+ * @i: integer value to add
10498+ * @v: pointer to type atomic64_unchecked_t
10499+ *
10500+ * Atomically adds @i to @v and returns @i + @v
10501+ */
10502+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
10503+{
10504+ long __i = i;
10505+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
10506 : "+r" (i), "+m" (v->counter)
10507 : : "memory");
10508 return i + __i;
10509@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
10510 }
10511
10512 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
10513+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10514+{
10515+ return atomic64_add_return_unchecked(1, v);
10516+}
10517 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
10518
10519 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10520@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10521 return cmpxchg(&v->counter, old, new);
10522 }
10523
10524+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
10525+{
10526+ return cmpxchg(&v->counter, old, new);
10527+}
10528+
10529 static inline long atomic64_xchg(atomic64_t *v, long new)
10530 {
10531 return xchg(&v->counter, new);
10532 }
10533
10534+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10535+{
10536+ return xchg(&v->counter, new);
10537+}
10538+
10539 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
10540 {
10541 return cmpxchg(&v->counter, old, new);
10542 }
10543
10544+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
10545+{
10546+ return cmpxchg(&v->counter, old, new);
10547+}
10548+
10549 static inline long atomic_xchg(atomic_t *v, int new)
10550 {
10551 return xchg(&v->counter, new);
10552 }
10553
10554+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10555+{
10556+ return xchg(&v->counter, new);
10557+}
10558+
10559 /**
10560 * atomic_add_unless - add unless the number is a given value
10561 * @v: pointer of type atomic_t
10562@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
10563 */
10564 static inline int atomic_add_unless(atomic_t *v, int a, int u)
10565 {
10566- int c, old;
10567+ int c, old, new;
10568 c = atomic_read(v);
10569 for (;;) {
10570- if (unlikely(c == (u)))
10571+ if (unlikely(c == u))
10572 break;
10573- old = atomic_cmpxchg((v), c, c + (a));
10574+
10575+ asm volatile("addl %2,%0\n"
10576+
10577+#ifdef CONFIG_PAX_REFCOUNT
10578+ "jno 0f\n"
10579+ "subl %2,%0\n"
10580+ "int $4\n0:\n"
10581+ _ASM_EXTABLE(0b, 0b)
10582+#endif
10583+
10584+ : "=r" (new)
10585+ : "0" (c), "ir" (a));
10586+
10587+ old = atomic_cmpxchg(v, c, new);
10588 if (likely(old == c))
10589 break;
10590 c = old;
10591 }
10592- return c != (u);
10593+ return c != u;
10594 }
10595
10596 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
10597@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
10598 */
10599 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
10600 {
10601- long c, old;
10602+ long c, old, new;
10603 c = atomic64_read(v);
10604 for (;;) {
10605- if (unlikely(c == (u)))
10606+ if (unlikely(c == u))
10607 break;
10608- old = atomic64_cmpxchg((v), c, c + (a));
10609+
10610+ asm volatile("addq %2,%0\n"
10611+
10612+#ifdef CONFIG_PAX_REFCOUNT
10613+ "jno 0f\n"
10614+ "subq %2,%0\n"
10615+ "int $4\n0:\n"
10616+ _ASM_EXTABLE(0b, 0b)
10617+#endif
10618+
10619+ : "=r" (new)
10620+ : "0" (c), "er" (a));
10621+
10622+ old = atomic64_cmpxchg(v, c, new);
10623 if (likely(old == c))
10624 break;
10625 c = old;
10626 }
10627- return c != (u);
10628+ return c != u;
10629 }
10630
10631 /**
10632diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
10633index 02b47a6..d5c4b15 100644
10634--- a/arch/x86/include/asm/bitops.h
10635+++ b/arch/x86/include/asm/bitops.h
10636@@ -38,7 +38,7 @@
10637 * a mask operation on a byte.
10638 */
10639 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
10640-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
10641+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
10642 #define CONST_MASK(nr) (1 << ((nr) & 7))
10643
10644 /**
10645diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
10646index 7a10659..8bbf355 100644
10647--- a/arch/x86/include/asm/boot.h
10648+++ b/arch/x86/include/asm/boot.h
10649@@ -11,10 +11,15 @@
10650 #include <asm/pgtable_types.h>
10651
10652 /* Physical address where kernel should be loaded. */
10653-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10654+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10655 + (CONFIG_PHYSICAL_ALIGN - 1)) \
10656 & ~(CONFIG_PHYSICAL_ALIGN - 1))
10657
10658+#ifndef __ASSEMBLY__
10659+extern unsigned char __LOAD_PHYSICAL_ADDR[];
10660+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
10661+#endif
10662+
10663 /* Minimum kernel alignment, as a power of two */
10664 #ifdef CONFIG_X86_64
10665 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
10666diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
10667index 549860d..7d45f68 100644
10668--- a/arch/x86/include/asm/cache.h
10669+++ b/arch/x86/include/asm/cache.h
10670@@ -5,9 +5,10 @@
10671
10672 /* L1 cache line size */
10673 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10674-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10675+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10676
10677 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
10678+#define __read_only __attribute__((__section__(".data.read_only")))
10679
10680 #ifdef CONFIG_X86_VSMP
10681 /* vSMP Internode cacheline shift */
10682diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
10683index b54f6af..5b376a6 100644
10684--- a/arch/x86/include/asm/cacheflush.h
10685+++ b/arch/x86/include/asm/cacheflush.h
10686@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
10687 static inline unsigned long get_page_memtype(struct page *pg)
10688 {
10689 if (!PageUncached(pg) && !PageWC(pg))
10690- return -1;
10691+ return ~0UL;
10692 else if (!PageUncached(pg) && PageWC(pg))
10693 return _PAGE_CACHE_WC;
10694 else if (PageUncached(pg) && !PageWC(pg))
10695@@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
10696 SetPageWC(pg);
10697 break;
10698 default:
10699- case -1:
10700+ case ~0UL:
10701 ClearPageUncached(pg);
10702 ClearPageWC(pg);
10703 break;
10704diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
10705index 0e63c9a..ab8d972 100644
10706--- a/arch/x86/include/asm/calling.h
10707+++ b/arch/x86/include/asm/calling.h
10708@@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
10709 * for assembly code:
10710 */
10711
10712-#define R15 0
10713-#define R14 8
10714-#define R13 16
10715-#define R12 24
10716-#define RBP 32
10717-#define RBX 40
10718+#define R15 (0)
10719+#define R14 (8)
10720+#define R13 (16)
10721+#define R12 (24)
10722+#define RBP (32)
10723+#define RBX (40)
10724
10725 /* arguments: interrupts/non tracing syscalls only save up to here: */
10726-#define R11 48
10727-#define R10 56
10728-#define R9 64
10729-#define R8 72
10730-#define RAX 80
10731-#define RCX 88
10732-#define RDX 96
10733-#define RSI 104
10734-#define RDI 112
10735-#define ORIG_RAX 120 /* + error_code */
10736+#define R11 (48)
10737+#define R10 (56)
10738+#define R9 (64)
10739+#define R8 (72)
10740+#define RAX (80)
10741+#define RCX (88)
10742+#define RDX (96)
10743+#define RSI (104)
10744+#define RDI (112)
10745+#define ORIG_RAX (120) /* + error_code */
10746 /* end of arguments */
10747
10748 /* cpu exception frame or undefined in case of fast syscall: */
10749-#define RIP 128
10750-#define CS 136
10751-#define EFLAGS 144
10752-#define RSP 152
10753-#define SS 160
10754+#define RIP (128)
10755+#define CS (136)
10756+#define EFLAGS (144)
10757+#define RSP (152)
10758+#define SS (160)
10759
10760 #define ARGOFFSET R11
10761 #define SWFRAME ORIG_RAX
10762diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10763index 46fc474..b02b0f9 100644
10764--- a/arch/x86/include/asm/checksum_32.h
10765+++ b/arch/x86/include/asm/checksum_32.h
10766@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
10767 int len, __wsum sum,
10768 int *src_err_ptr, int *dst_err_ptr);
10769
10770+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10771+ int len, __wsum sum,
10772+ int *src_err_ptr, int *dst_err_ptr);
10773+
10774+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10775+ int len, __wsum sum,
10776+ int *src_err_ptr, int *dst_err_ptr);
10777+
10778 /*
10779 * Note: when you get a NULL pointer exception here this means someone
10780 * passed in an incorrect kernel address to one of these functions.
10781@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
10782 int *err_ptr)
10783 {
10784 might_sleep();
10785- return csum_partial_copy_generic((__force void *)src, dst,
10786+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
10787 len, sum, err_ptr, NULL);
10788 }
10789
10790@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
10791 {
10792 might_sleep();
10793 if (access_ok(VERIFY_WRITE, dst, len))
10794- return csum_partial_copy_generic(src, (__force void *)dst,
10795+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10796 len, sum, NULL, err_ptr);
10797
10798 if (len)
10799diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
10800index 617bd56..7b047a1 100644
10801--- a/arch/x86/include/asm/desc.h
10802+++ b/arch/x86/include/asm/desc.h
10803@@ -4,6 +4,7 @@
10804 #include <asm/desc_defs.h>
10805 #include <asm/ldt.h>
10806 #include <asm/mmu.h>
10807+#include <asm/pgtable.h>
10808 #include <linux/smp.h>
10809
10810 static inline void fill_ldt(struct desc_struct *desc,
10811@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
10812 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
10813 desc->type = (info->read_exec_only ^ 1) << 1;
10814 desc->type |= info->contents << 2;
10815+ desc->type |= info->seg_not_present ^ 1;
10816 desc->s = 1;
10817 desc->dpl = 0x3;
10818 desc->p = info->seg_not_present ^ 1;
10819@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
10820 }
10821
10822 extern struct desc_ptr idt_descr;
10823-extern gate_desc idt_table[];
10824-
10825-struct gdt_page {
10826- struct desc_struct gdt[GDT_ENTRIES];
10827-} __attribute__((aligned(PAGE_SIZE)));
10828-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
10829+extern gate_desc idt_table[256];
10830
10831+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
10832 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10833 {
10834- return per_cpu(gdt_page, cpu).gdt;
10835+ return cpu_gdt_table[cpu];
10836 }
10837
10838 #ifdef CONFIG_X86_64
10839@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
10840 unsigned long base, unsigned dpl, unsigned flags,
10841 unsigned short seg)
10842 {
10843- gate->a = (seg << 16) | (base & 0xffff);
10844- gate->b = (base & 0xffff0000) |
10845- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10846+ gate->gate.offset_low = base;
10847+ gate->gate.seg = seg;
10848+ gate->gate.reserved = 0;
10849+ gate->gate.type = type;
10850+ gate->gate.s = 0;
10851+ gate->gate.dpl = dpl;
10852+ gate->gate.p = 1;
10853+ gate->gate.offset_high = base >> 16;
10854 }
10855
10856 #endif
10857@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
10858 static inline void native_write_idt_entry(gate_desc *idt, int entry,
10859 const gate_desc *gate)
10860 {
10861+ pax_open_kernel();
10862 memcpy(&idt[entry], gate, sizeof(*gate));
10863+ pax_close_kernel();
10864 }
10865
10866 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
10867 const void *desc)
10868 {
10869+ pax_open_kernel();
10870 memcpy(&ldt[entry], desc, 8);
10871+ pax_close_kernel();
10872 }
10873
10874 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
10875@@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
10876 size = sizeof(struct desc_struct);
10877 break;
10878 }
10879+
10880+ pax_open_kernel();
10881 memcpy(&gdt[entry], desc, size);
10882+ pax_close_kernel();
10883 }
10884
10885 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10886@@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10887
10888 static inline void native_load_tr_desc(void)
10889 {
10890+ pax_open_kernel();
10891 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10892+ pax_close_kernel();
10893 }
10894
10895 static inline void native_load_gdt(const struct desc_ptr *dtr)
10896@@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10897 unsigned int i;
10898 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10899
10900+ pax_open_kernel();
10901 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10902 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10903+ pax_close_kernel();
10904 }
10905
10906 #define _LDT_empty(info) \
10907@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10908 desc->limit = (limit >> 16) & 0xf;
10909 }
10910
10911-static inline void _set_gate(int gate, unsigned type, void *addr,
10912+static inline void _set_gate(int gate, unsigned type, const void *addr,
10913 unsigned dpl, unsigned ist, unsigned seg)
10914 {
10915 gate_desc s;
10916@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10917 * Pentium F0 0F bugfix can have resulted in the mapped
10918 * IDT being write-protected.
10919 */
10920-static inline void set_intr_gate(unsigned int n, void *addr)
10921+static inline void set_intr_gate(unsigned int n, const void *addr)
10922 {
10923 BUG_ON((unsigned)n > 0xFF);
10924 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10925@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10926 /*
10927 * This routine sets up an interrupt gate at directory privilege level 3.
10928 */
10929-static inline void set_system_intr_gate(unsigned int n, void *addr)
10930+static inline void set_system_intr_gate(unsigned int n, const void *addr)
10931 {
10932 BUG_ON((unsigned)n > 0xFF);
10933 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10934 }
10935
10936-static inline void set_system_trap_gate(unsigned int n, void *addr)
10937+static inline void set_system_trap_gate(unsigned int n, const void *addr)
10938 {
10939 BUG_ON((unsigned)n > 0xFF);
10940 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10941 }
10942
10943-static inline void set_trap_gate(unsigned int n, void *addr)
10944+static inline void set_trap_gate(unsigned int n, const void *addr)
10945 {
10946 BUG_ON((unsigned)n > 0xFF);
10947 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10948@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10949 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10950 {
10951 BUG_ON((unsigned)n > 0xFF);
10952- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10953+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10954 }
10955
10956-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10957+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10958 {
10959 BUG_ON((unsigned)n > 0xFF);
10960 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10961 }
10962
10963-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10964+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10965 {
10966 BUG_ON((unsigned)n > 0xFF);
10967 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10968 }
10969
10970+#ifdef CONFIG_X86_32
10971+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10972+{
10973+ struct desc_struct d;
10974+
10975+ if (likely(limit))
10976+ limit = (limit - 1UL) >> PAGE_SHIFT;
10977+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
10978+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10979+}
10980+#endif
10981+
10982 #endif /* _ASM_X86_DESC_H */
10983diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10984index 9d66848..6b4a691 100644
10985--- a/arch/x86/include/asm/desc_defs.h
10986+++ b/arch/x86/include/asm/desc_defs.h
10987@@ -31,6 +31,12 @@ struct desc_struct {
10988 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10989 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10990 };
10991+ struct {
10992+ u16 offset_low;
10993+ u16 seg;
10994+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10995+ unsigned offset_high: 16;
10996+ } gate;
10997 };
10998 } __attribute__((packed));
10999
11000diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
11001index cee34e9..a7c3fa2 100644
11002--- a/arch/x86/include/asm/device.h
11003+++ b/arch/x86/include/asm/device.h
11004@@ -6,7 +6,7 @@ struct dev_archdata {
11005 void *acpi_handle;
11006 #endif
11007 #ifdef CONFIG_X86_64
11008-struct dma_map_ops *dma_ops;
11009+ const struct dma_map_ops *dma_ops;
11010 #endif
11011 #ifdef CONFIG_DMAR
11012 void *iommu; /* hook for IOMMU specific extension */
11013diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
11014index 6a25d5d..786b202 100644
11015--- a/arch/x86/include/asm/dma-mapping.h
11016+++ b/arch/x86/include/asm/dma-mapping.h
11017@@ -25,9 +25,9 @@ extern int iommu_merge;
11018 extern struct device x86_dma_fallback_dev;
11019 extern int panic_on_overflow;
11020
11021-extern struct dma_map_ops *dma_ops;
11022+extern const struct dma_map_ops *dma_ops;
11023
11024-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
11025+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
11026 {
11027 #ifdef CONFIG_X86_32
11028 return dma_ops;
11029@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
11030 /* Make sure we keep the same behaviour */
11031 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
11032 {
11033- struct dma_map_ops *ops = get_dma_ops(dev);
11034+ const struct dma_map_ops *ops = get_dma_ops(dev);
11035 if (ops->mapping_error)
11036 return ops->mapping_error(dev, dma_addr);
11037
11038@@ -122,7 +122,7 @@ static inline void *
11039 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
11040 gfp_t gfp)
11041 {
11042- struct dma_map_ops *ops = get_dma_ops(dev);
11043+ const struct dma_map_ops *ops = get_dma_ops(dev);
11044 void *memory;
11045
11046 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
11047@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
11048 static inline void dma_free_coherent(struct device *dev, size_t size,
11049 void *vaddr, dma_addr_t bus)
11050 {
11051- struct dma_map_ops *ops = get_dma_ops(dev);
11052+ const struct dma_map_ops *ops = get_dma_ops(dev);
11053
11054 WARN_ON(irqs_disabled()); /* for portability */
11055
11056diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
11057index 40b4e61..40d8133 100644
11058--- a/arch/x86/include/asm/e820.h
11059+++ b/arch/x86/include/asm/e820.h
11060@@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
11061 #define ISA_END_ADDRESS 0x100000
11062 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
11063
11064-#define BIOS_BEGIN 0x000a0000
11065+#define BIOS_BEGIN 0x000c0000
11066 #define BIOS_END 0x00100000
11067
11068 #ifdef __KERNEL__
11069diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
11070index 8ac9d9a..0a6c96e 100644
11071--- a/arch/x86/include/asm/elf.h
11072+++ b/arch/x86/include/asm/elf.h
11073@@ -257,7 +257,25 @@ extern int force_personality32;
11074 the loader. We need to make sure that it is out of the way of the program
11075 that it will "exec", and that there is sufficient room for the brk. */
11076
11077+#ifdef CONFIG_PAX_SEGMEXEC
11078+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
11079+#else
11080 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
11081+#endif
11082+
11083+#ifdef CONFIG_PAX_ASLR
11084+#ifdef CONFIG_X86_32
11085+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
11086+
11087+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
11088+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
11089+#else
11090+#define PAX_ELF_ET_DYN_BASE 0x400000UL
11091+
11092+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
11093+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
11094+#endif
11095+#endif
11096
11097 /* This yields a mask that user programs can use to figure out what
11098 instruction set this CPU supports. This could be done in user space,
11099@@ -310,9 +328,7 @@ do { \
11100
11101 #define ARCH_DLINFO \
11102 do { \
11103- if (vdso_enabled) \
11104- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
11105- (unsigned long)current->mm->context.vdso); \
11106+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
11107 } while (0)
11108
11109 #define AT_SYSINFO 32
11110@@ -323,7 +339,7 @@ do { \
11111
11112 #endif /* !CONFIG_X86_32 */
11113
11114-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
11115+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
11116
11117 #define VDSO_ENTRY \
11118 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
11119@@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
11120 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
11121 #define compat_arch_setup_additional_pages syscall32_setup_pages
11122
11123-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
11124-#define arch_randomize_brk arch_randomize_brk
11125-
11126 #endif /* _ASM_X86_ELF_H */
11127diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
11128index cc70c1c..d96d011 100644
11129--- a/arch/x86/include/asm/emergency-restart.h
11130+++ b/arch/x86/include/asm/emergency-restart.h
11131@@ -15,6 +15,6 @@ enum reboot_type {
11132
11133 extern enum reboot_type reboot_type;
11134
11135-extern void machine_emergency_restart(void);
11136+extern void machine_emergency_restart(void) __noreturn;
11137
11138 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
11139diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
11140index dbe82a5..c6d8a00 100644
11141--- a/arch/x86/include/asm/floppy.h
11142+++ b/arch/x86/include/asm/floppy.h
11143@@ -157,6 +157,7 @@ static unsigned long dma_mem_alloc(unsigned long size)
11144 }
11145
11146
11147+static unsigned long vdma_mem_alloc(unsigned long size) __size_overflow(1);
11148 static unsigned long vdma_mem_alloc(unsigned long size)
11149 {
11150 return (unsigned long)vmalloc(size);
11151diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
11152index 1f11ce4..7caabd1 100644
11153--- a/arch/x86/include/asm/futex.h
11154+++ b/arch/x86/include/asm/futex.h
11155@@ -12,16 +12,18 @@
11156 #include <asm/system.h>
11157
11158 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
11159+ typecheck(u32 __user *, uaddr); \
11160 asm volatile("1:\t" insn "\n" \
11161 "2:\t.section .fixup,\"ax\"\n" \
11162 "3:\tmov\t%3, %1\n" \
11163 "\tjmp\t2b\n" \
11164 "\t.previous\n" \
11165 _ASM_EXTABLE(1b, 3b) \
11166- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
11167+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
11168 : "i" (-EFAULT), "0" (oparg), "1" (0))
11169
11170 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
11171+ typecheck(u32 __user *, uaddr); \
11172 asm volatile("1:\tmovl %2, %0\n" \
11173 "\tmovl\t%0, %3\n" \
11174 "\t" insn "\n" \
11175@@ -34,10 +36,10 @@
11176 _ASM_EXTABLE(1b, 4b) \
11177 _ASM_EXTABLE(2b, 4b) \
11178 : "=&a" (oldval), "=&r" (ret), \
11179- "+m" (*uaddr), "=&r" (tem) \
11180+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
11181 : "r" (oparg), "i" (-EFAULT), "1" (0))
11182
11183-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
11184+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
11185 {
11186 int op = (encoded_op >> 28) & 7;
11187 int cmp = (encoded_op >> 24) & 15;
11188@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
11189
11190 switch (op) {
11191 case FUTEX_OP_SET:
11192- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
11193+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
11194 break;
11195 case FUTEX_OP_ADD:
11196- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
11197+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
11198 uaddr, oparg);
11199 break;
11200 case FUTEX_OP_OR:
11201@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
11202 return ret;
11203 }
11204
11205-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
11206+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
11207 int newval)
11208 {
11209
11210@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
11211 return -ENOSYS;
11212 #endif
11213
11214- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
11215+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
11216 return -EFAULT;
11217
11218- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
11219+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
11220 "2:\t.section .fixup, \"ax\"\n"
11221 "3:\tmov %2, %0\n"
11222 "\tjmp 2b\n"
11223 "\t.previous\n"
11224 _ASM_EXTABLE(1b, 3b)
11225- : "=a" (oldval), "+m" (*uaddr)
11226+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
11227 : "i" (-EFAULT), "r" (newval), "0" (oldval)
11228 : "memory"
11229 );
11230diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
11231index ba180d9..3bad351 100644
11232--- a/arch/x86/include/asm/hw_irq.h
11233+++ b/arch/x86/include/asm/hw_irq.h
11234@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
11235 extern void enable_IO_APIC(void);
11236
11237 /* Statistics */
11238-extern atomic_t irq_err_count;
11239-extern atomic_t irq_mis_count;
11240+extern atomic_unchecked_t irq_err_count;
11241+extern atomic_unchecked_t irq_mis_count;
11242
11243 /* EISA */
11244 extern void eisa_set_level_irq(unsigned int irq);
11245diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
11246index 0b20bbb..4cb1396 100644
11247--- a/arch/x86/include/asm/i387.h
11248+++ b/arch/x86/include/asm/i387.h
11249@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
11250 {
11251 int err;
11252
11253+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11254+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
11255+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
11256+#endif
11257+
11258 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
11259 "2:\n"
11260 ".section .fixup,\"ax\"\n"
11261@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
11262 {
11263 int err;
11264
11265+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11266+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
11267+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
11268+#endif
11269+
11270 asm volatile("1: rex64/fxsave (%[fx])\n\t"
11271 "2:\n"
11272 ".section .fixup,\"ax\"\n"
11273@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
11274 }
11275
11276 /* We need a safe address that is cheap to find and that is already
11277- in L1 during context switch. The best choices are unfortunately
11278- different for UP and SMP */
11279-#ifdef CONFIG_SMP
11280-#define safe_address (__per_cpu_offset[0])
11281-#else
11282-#define safe_address (kstat_cpu(0).cpustat.user)
11283-#endif
11284+ in L1 during context switch. */
11285+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
11286
11287 /*
11288 * These must be called with preempt disabled
11289@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void)
11290 struct thread_info *me = current_thread_info();
11291 preempt_disable();
11292 if (me->status & TS_USEDFPU)
11293- __save_init_fpu(me->task);
11294+ __save_init_fpu(current);
11295 else
11296 clts();
11297 }
11298diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
11299index a299900..15c5410 100644
11300--- a/arch/x86/include/asm/io_32.h
11301+++ b/arch/x86/include/asm/io_32.h
11302@@ -3,6 +3,7 @@
11303
11304 #include <linux/string.h>
11305 #include <linux/compiler.h>
11306+#include <asm/processor.h>
11307
11308 /*
11309 * This file contains the definitions for the x86 IO instructions
11310@@ -42,6 +43,17 @@
11311
11312 #ifdef __KERNEL__
11313
11314+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
11315+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
11316+{
11317+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11318+}
11319+
11320+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
11321+{
11322+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11323+}
11324+
11325 #include <asm-generic/iomap.h>
11326
11327 #include <linux/vmalloc.h>
11328diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
11329index 2440678..c158b88 100644
11330--- a/arch/x86/include/asm/io_64.h
11331+++ b/arch/x86/include/asm/io_64.h
11332@@ -140,6 +140,17 @@ __OUTS(l)
11333
11334 #include <linux/vmalloc.h>
11335
11336+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
11337+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
11338+{
11339+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11340+}
11341+
11342+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
11343+{
11344+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11345+}
11346+
11347 #include <asm-generic/iomap.h>
11348
11349 void __memcpy_fromio(void *, unsigned long, unsigned);
11350diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
11351index fd6d21b..8b13915 100644
11352--- a/arch/x86/include/asm/iommu.h
11353+++ b/arch/x86/include/asm/iommu.h
11354@@ -3,7 +3,7 @@
11355
11356 extern void pci_iommu_shutdown(void);
11357 extern void no_iommu_init(void);
11358-extern struct dma_map_ops nommu_dma_ops;
11359+extern const struct dma_map_ops nommu_dma_ops;
11360 extern int force_iommu, no_iommu;
11361 extern int iommu_detected;
11362 extern int iommu_pass_through;
11363diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
11364index 9e2b952..557206e 100644
11365--- a/arch/x86/include/asm/irqflags.h
11366+++ b/arch/x86/include/asm/irqflags.h
11367@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
11368 sti; \
11369 sysexit
11370
11371+#define GET_CR0_INTO_RDI mov %cr0, %rdi
11372+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
11373+#define GET_CR3_INTO_RDI mov %cr3, %rdi
11374+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
11375+
11376 #else
11377 #define INTERRUPT_RETURN iret
11378 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
11379diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
11380index 4fe681d..bb6d40c 100644
11381--- a/arch/x86/include/asm/kprobes.h
11382+++ b/arch/x86/include/asm/kprobes.h
11383@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
11384 #define BREAKPOINT_INSTRUCTION 0xcc
11385 #define RELATIVEJUMP_INSTRUCTION 0xe9
11386 #define MAX_INSN_SIZE 16
11387-#define MAX_STACK_SIZE 64
11388-#define MIN_STACK_SIZE(ADDR) \
11389- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
11390- THREAD_SIZE - (unsigned long)(ADDR))) \
11391- ? (MAX_STACK_SIZE) \
11392- : (((unsigned long)current_thread_info()) + \
11393- THREAD_SIZE - (unsigned long)(ADDR)))
11394+#define MAX_STACK_SIZE 64UL
11395+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
11396
11397 #define flush_insn_slot(p) do { } while (0)
11398
11399diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
11400index 08bc2ff..acafd8f 100644
11401--- a/arch/x86/include/asm/kvm_host.h
11402+++ b/arch/x86/include/asm/kvm_host.h
11403@@ -534,9 +534,9 @@ struct kvm_x86_ops {
11404 bool (*gb_page_enable)(void);
11405
11406 const struct trace_print_flags *exit_reasons_str;
11407-};
11408+} __do_const;
11409
11410-extern struct kvm_x86_ops *kvm_x86_ops;
11411+extern const struct kvm_x86_ops *kvm_x86_ops;
11412
11413 int kvm_mmu_module_init(void);
11414 void kvm_mmu_module_exit(void);
11415@@ -558,9 +558,9 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
11416 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
11417
11418 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
11419- const void *val, int bytes);
11420+ const void *val, int bytes) __size_overflow(2);
11421 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
11422- gpa_t addr, unsigned long *ret);
11423+ gpa_t addr, unsigned long *ret) __size_overflow(2,3);
11424 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
11425
11426 extern bool tdp_enabled;
11427@@ -619,7 +619,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
11428 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
11429
11430 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
11431-int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
11432+int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) __size_overflow(3);
11433
11434 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
11435 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
11436@@ -643,7 +643,7 @@ unsigned long segment_base(u16 selector);
11437 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
11438 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
11439 const u8 *new, int bytes,
11440- bool guest_initiated);
11441+ bool guest_initiated) __size_overflow(2);
11442 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
11443 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
11444 int kvm_mmu_load(struct kvm_vcpu *vcpu);
11445diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
11446index 47b9b6f..815aaa1 100644
11447--- a/arch/x86/include/asm/local.h
11448+++ b/arch/x86/include/asm/local.h
11449@@ -18,26 +18,58 @@ typedef struct {
11450
11451 static inline void local_inc(local_t *l)
11452 {
11453- asm volatile(_ASM_INC "%0"
11454+ asm volatile(_ASM_INC "%0\n"
11455+
11456+#ifdef CONFIG_PAX_REFCOUNT
11457+ "jno 0f\n"
11458+ _ASM_DEC "%0\n"
11459+ "int $4\n0:\n"
11460+ _ASM_EXTABLE(0b, 0b)
11461+#endif
11462+
11463 : "+m" (l->a.counter));
11464 }
11465
11466 static inline void local_dec(local_t *l)
11467 {
11468- asm volatile(_ASM_DEC "%0"
11469+ asm volatile(_ASM_DEC "%0\n"
11470+
11471+#ifdef CONFIG_PAX_REFCOUNT
11472+ "jno 0f\n"
11473+ _ASM_INC "%0\n"
11474+ "int $4\n0:\n"
11475+ _ASM_EXTABLE(0b, 0b)
11476+#endif
11477+
11478 : "+m" (l->a.counter));
11479 }
11480
11481 static inline void local_add(long i, local_t *l)
11482 {
11483- asm volatile(_ASM_ADD "%1,%0"
11484+ asm volatile(_ASM_ADD "%1,%0\n"
11485+
11486+#ifdef CONFIG_PAX_REFCOUNT
11487+ "jno 0f\n"
11488+ _ASM_SUB "%1,%0\n"
11489+ "int $4\n0:\n"
11490+ _ASM_EXTABLE(0b, 0b)
11491+#endif
11492+
11493 : "+m" (l->a.counter)
11494 : "ir" (i));
11495 }
11496
11497 static inline void local_sub(long i, local_t *l)
11498 {
11499- asm volatile(_ASM_SUB "%1,%0"
11500+ asm volatile(_ASM_SUB "%1,%0\n"
11501+
11502+#ifdef CONFIG_PAX_REFCOUNT
11503+ "jno 0f\n"
11504+ _ASM_ADD "%1,%0\n"
11505+ "int $4\n0:\n"
11506+ _ASM_EXTABLE(0b, 0b)
11507+#endif
11508+
11509 : "+m" (l->a.counter)
11510 : "ir" (i));
11511 }
11512@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
11513 {
11514 unsigned char c;
11515
11516- asm volatile(_ASM_SUB "%2,%0; sete %1"
11517+ asm volatile(_ASM_SUB "%2,%0\n"
11518+
11519+#ifdef CONFIG_PAX_REFCOUNT
11520+ "jno 0f\n"
11521+ _ASM_ADD "%2,%0\n"
11522+ "int $4\n0:\n"
11523+ _ASM_EXTABLE(0b, 0b)
11524+#endif
11525+
11526+ "sete %1\n"
11527 : "+m" (l->a.counter), "=qm" (c)
11528 : "ir" (i) : "memory");
11529 return c;
11530@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
11531 {
11532 unsigned char c;
11533
11534- asm volatile(_ASM_DEC "%0; sete %1"
11535+ asm volatile(_ASM_DEC "%0\n"
11536+
11537+#ifdef CONFIG_PAX_REFCOUNT
11538+ "jno 0f\n"
11539+ _ASM_INC "%0\n"
11540+ "int $4\n0:\n"
11541+ _ASM_EXTABLE(0b, 0b)
11542+#endif
11543+
11544+ "sete %1\n"
11545 : "+m" (l->a.counter), "=qm" (c)
11546 : : "memory");
11547 return c != 0;
11548@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
11549 {
11550 unsigned char c;
11551
11552- asm volatile(_ASM_INC "%0; sete %1"
11553+ asm volatile(_ASM_INC "%0\n"
11554+
11555+#ifdef CONFIG_PAX_REFCOUNT
11556+ "jno 0f\n"
11557+ _ASM_DEC "%0\n"
11558+ "int $4\n0:\n"
11559+ _ASM_EXTABLE(0b, 0b)
11560+#endif
11561+
11562+ "sete %1\n"
11563 : "+m" (l->a.counter), "=qm" (c)
11564 : : "memory");
11565 return c != 0;
11566@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
11567 {
11568 unsigned char c;
11569
11570- asm volatile(_ASM_ADD "%2,%0; sets %1"
11571+ asm volatile(_ASM_ADD "%2,%0\n"
11572+
11573+#ifdef CONFIG_PAX_REFCOUNT
11574+ "jno 0f\n"
11575+ _ASM_SUB "%2,%0\n"
11576+ "int $4\n0:\n"
11577+ _ASM_EXTABLE(0b, 0b)
11578+#endif
11579+
11580+ "sets %1\n"
11581 : "+m" (l->a.counter), "=qm" (c)
11582 : "ir" (i) : "memory");
11583 return c;
11584@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
11585 #endif
11586 /* Modern 486+ processor */
11587 __i = i;
11588- asm volatile(_ASM_XADD "%0, %1;"
11589+ asm volatile(_ASM_XADD "%0, %1\n"
11590+
11591+#ifdef CONFIG_PAX_REFCOUNT
11592+ "jno 0f\n"
11593+ _ASM_MOV "%0,%1\n"
11594+ "int $4\n0:\n"
11595+ _ASM_EXTABLE(0b, 0b)
11596+#endif
11597+
11598 : "+r" (i), "+m" (l->a.counter)
11599 : : "memory");
11600 return i + __i;
11601diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
11602index ef51b50..514ba37 100644
11603--- a/arch/x86/include/asm/microcode.h
11604+++ b/arch/x86/include/asm/microcode.h
11605@@ -12,13 +12,13 @@ struct device;
11606 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
11607
11608 struct microcode_ops {
11609- enum ucode_state (*request_microcode_user) (int cpu,
11610+ enum ucode_state (* const request_microcode_user) (int cpu,
11611 const void __user *buf, size_t size);
11612
11613- enum ucode_state (*request_microcode_fw) (int cpu,
11614+ enum ucode_state (* const request_microcode_fw) (int cpu,
11615 struct device *device);
11616
11617- void (*microcode_fini_cpu) (int cpu);
11618+ void (* const microcode_fini_cpu) (int cpu);
11619
11620 /*
11621 * The generic 'microcode_core' part guarantees that
11622@@ -38,18 +38,18 @@ struct ucode_cpu_info {
11623 extern struct ucode_cpu_info ucode_cpu_info[];
11624
11625 #ifdef CONFIG_MICROCODE_INTEL
11626-extern struct microcode_ops * __init init_intel_microcode(void);
11627+extern const struct microcode_ops * __init init_intel_microcode(void);
11628 #else
11629-static inline struct microcode_ops * __init init_intel_microcode(void)
11630+static inline const struct microcode_ops * __init init_intel_microcode(void)
11631 {
11632 return NULL;
11633 }
11634 #endif /* CONFIG_MICROCODE_INTEL */
11635
11636 #ifdef CONFIG_MICROCODE_AMD
11637-extern struct microcode_ops * __init init_amd_microcode(void);
11638+extern const struct microcode_ops * __init init_amd_microcode(void);
11639 #else
11640-static inline struct microcode_ops * __init init_amd_microcode(void)
11641+static inline const struct microcode_ops * __init init_amd_microcode(void)
11642 {
11643 return NULL;
11644 }
11645diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
11646index 593e51d..fa69c9a 100644
11647--- a/arch/x86/include/asm/mman.h
11648+++ b/arch/x86/include/asm/mman.h
11649@@ -5,4 +5,14 @@
11650
11651 #include <asm-generic/mman.h>
11652
11653+#ifdef __KERNEL__
11654+#ifndef __ASSEMBLY__
11655+#ifdef CONFIG_X86_32
11656+#define arch_mmap_check i386_mmap_check
11657+int i386_mmap_check(unsigned long addr, unsigned long len,
11658+ unsigned long flags);
11659+#endif
11660+#endif
11661+#endif
11662+
11663 #endif /* _ASM_X86_MMAN_H */
11664diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
11665index 80a1dee..239c67d 100644
11666--- a/arch/x86/include/asm/mmu.h
11667+++ b/arch/x86/include/asm/mmu.h
11668@@ -9,10 +9,23 @@
11669 * we put the segment information here.
11670 */
11671 typedef struct {
11672- void *ldt;
11673+ struct desc_struct *ldt;
11674 int size;
11675 struct mutex lock;
11676- void *vdso;
11677+ unsigned long vdso;
11678+
11679+#ifdef CONFIG_X86_32
11680+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
11681+ unsigned long user_cs_base;
11682+ unsigned long user_cs_limit;
11683+
11684+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11685+ cpumask_t cpu_user_cs_mask;
11686+#endif
11687+
11688+#endif
11689+#endif
11690+
11691 } mm_context_t;
11692
11693 #ifdef CONFIG_SMP
11694diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
11695index 8b5393e..8143173 100644
11696--- a/arch/x86/include/asm/mmu_context.h
11697+++ b/arch/x86/include/asm/mmu_context.h
11698@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
11699
11700 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
11701 {
11702+
11703+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11704+ unsigned int i;
11705+ pgd_t *pgd;
11706+
11707+ pax_open_kernel();
11708+ pgd = get_cpu_pgd(smp_processor_id());
11709+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
11710+ set_pgd_batched(pgd+i, native_make_pgd(0));
11711+ pax_close_kernel();
11712+#endif
11713+
11714 #ifdef CONFIG_SMP
11715 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
11716 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
11717@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11718 struct task_struct *tsk)
11719 {
11720 unsigned cpu = smp_processor_id();
11721+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
11722+ int tlbstate = TLBSTATE_OK;
11723+#endif
11724
11725 if (likely(prev != next)) {
11726 #ifdef CONFIG_SMP
11727+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11728+ tlbstate = percpu_read(cpu_tlbstate.state);
11729+#endif
11730 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11731 percpu_write(cpu_tlbstate.active_mm, next);
11732 #endif
11733 cpumask_set_cpu(cpu, mm_cpumask(next));
11734
11735 /* Re-load page tables */
11736+#ifdef CONFIG_PAX_PER_CPU_PGD
11737+ pax_open_kernel();
11738+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
11739+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
11740+ pax_close_kernel();
11741+ load_cr3(get_cpu_pgd(cpu));
11742+#else
11743 load_cr3(next->pgd);
11744+#endif
11745
11746 /* stop flush ipis for the previous mm */
11747 cpumask_clear_cpu(cpu, mm_cpumask(prev));
11748@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11749 */
11750 if (unlikely(prev->context.ldt != next->context.ldt))
11751 load_LDT_nolock(&next->context);
11752- }
11753+
11754+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11755+ if (!nx_enabled) {
11756+ smp_mb__before_clear_bit();
11757+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
11758+ smp_mb__after_clear_bit();
11759+ cpu_set(cpu, next->context.cpu_user_cs_mask);
11760+ }
11761+#endif
11762+
11763+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11764+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
11765+ prev->context.user_cs_limit != next->context.user_cs_limit))
11766+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11767 #ifdef CONFIG_SMP
11768+ else if (unlikely(tlbstate != TLBSTATE_OK))
11769+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11770+#endif
11771+#endif
11772+
11773+ }
11774 else {
11775+
11776+#ifdef CONFIG_PAX_PER_CPU_PGD
11777+ pax_open_kernel();
11778+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
11779+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
11780+ pax_close_kernel();
11781+ load_cr3(get_cpu_pgd(cpu));
11782+#endif
11783+
11784+#ifdef CONFIG_SMP
11785 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11786 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
11787
11788@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11789 * tlb flush IPI delivery. We must reload CR3
11790 * to make sure to use no freed page tables.
11791 */
11792+
11793+#ifndef CONFIG_PAX_PER_CPU_PGD
11794 load_cr3(next->pgd);
11795+#endif
11796+
11797 load_LDT_nolock(&next->context);
11798+
11799+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
11800+ if (!nx_enabled)
11801+ cpu_set(cpu, next->context.cpu_user_cs_mask);
11802+#endif
11803+
11804+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11805+#ifdef CONFIG_PAX_PAGEEXEC
11806+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
11807+#endif
11808+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11809+#endif
11810+
11811 }
11812+#endif
11813 }
11814-#endif
11815 }
11816
11817 #define activate_mm(prev, next) \
11818diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
11819index 3e2ce58..caaf478 100644
11820--- a/arch/x86/include/asm/module.h
11821+++ b/arch/x86/include/asm/module.h
11822@@ -5,6 +5,7 @@
11823
11824 #ifdef CONFIG_X86_64
11825 /* X86_64 does not define MODULE_PROC_FAMILY */
11826+#define MODULE_PROC_FAMILY ""
11827 #elif defined CONFIG_M386
11828 #define MODULE_PROC_FAMILY "386 "
11829 #elif defined CONFIG_M486
11830@@ -59,13 +60,26 @@
11831 #error unknown processor family
11832 #endif
11833
11834-#ifdef CONFIG_X86_32
11835-# ifdef CONFIG_4KSTACKS
11836-# define MODULE_STACKSIZE "4KSTACKS "
11837-# else
11838-# define MODULE_STACKSIZE ""
11839-# endif
11840-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
11841+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
11842+#define MODULE_STACKSIZE "4KSTACKS "
11843+#else
11844+#define MODULE_STACKSIZE ""
11845 #endif
11846
11847+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11848+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
11849+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
11850+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
11851+#else
11852+#define MODULE_PAX_KERNEXEC ""
11853+#endif
11854+
11855+#ifdef CONFIG_PAX_MEMORY_UDEREF
11856+#define MODULE_PAX_UDEREF "UDEREF "
11857+#else
11858+#define MODULE_PAX_UDEREF ""
11859+#endif
11860+
11861+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
11862+
11863 #endif /* _ASM_X86_MODULE_H */
11864diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
11865index 7639dbf..e08a58c 100644
11866--- a/arch/x86/include/asm/page_64_types.h
11867+++ b/arch/x86/include/asm/page_64_types.h
11868@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
11869
11870 /* duplicated to the one in bootmem.h */
11871 extern unsigned long max_pfn;
11872-extern unsigned long phys_base;
11873+extern const unsigned long phys_base;
11874
11875 extern unsigned long __phys_addr(unsigned long);
11876 #define __phys_reloc_hide(x) (x)
11877diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
11878index efb3899..ef30687 100644
11879--- a/arch/x86/include/asm/paravirt.h
11880+++ b/arch/x86/include/asm/paravirt.h
11881@@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
11882 val);
11883 }
11884
11885+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11886+{
11887+ pgdval_t val = native_pgd_val(pgd);
11888+
11889+ if (sizeof(pgdval_t) > sizeof(long))
11890+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
11891+ val, (u64)val >> 32);
11892+ else
11893+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
11894+ val);
11895+}
11896+
11897 static inline void pgd_clear(pgd_t *pgdp)
11898 {
11899 set_pgd(pgdp, __pgd(0));
11900@@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
11901 pv_mmu_ops.set_fixmap(idx, phys, flags);
11902 }
11903
11904+#ifdef CONFIG_PAX_KERNEXEC
11905+static inline unsigned long pax_open_kernel(void)
11906+{
11907+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
11908+}
11909+
11910+static inline unsigned long pax_close_kernel(void)
11911+{
11912+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
11913+}
11914+#else
11915+static inline unsigned long pax_open_kernel(void) { return 0; }
11916+static inline unsigned long pax_close_kernel(void) { return 0; }
11917+#endif
11918+
11919 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11920
11921 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
11922@@ -945,7 +972,7 @@ extern void default_banner(void);
11923
11924 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11925 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11926-#define PARA_INDIRECT(addr) *%cs:addr
11927+#define PARA_INDIRECT(addr) *%ss:addr
11928 #endif
11929
11930 #define INTERRUPT_RETURN \
11931@@ -1022,6 +1049,21 @@ extern void default_banner(void);
11932 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11933 CLBR_NONE, \
11934 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11935+
11936+#define GET_CR0_INTO_RDI \
11937+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11938+ mov %rax,%rdi
11939+
11940+#define SET_RDI_INTO_CR0 \
11941+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11942+
11943+#define GET_CR3_INTO_RDI \
11944+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11945+ mov %rax,%rdi
11946+
11947+#define SET_RDI_INTO_CR3 \
11948+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11949+
11950 #endif /* CONFIG_X86_32 */
11951
11952 #endif /* __ASSEMBLY__ */
11953diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11954index 9357473..aeb2de5 100644
11955--- a/arch/x86/include/asm/paravirt_types.h
11956+++ b/arch/x86/include/asm/paravirt_types.h
11957@@ -78,19 +78,19 @@ struct pv_init_ops {
11958 */
11959 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11960 unsigned long addr, unsigned len);
11961-};
11962+} __no_const;
11963
11964
11965 struct pv_lazy_ops {
11966 /* Set deferred update mode, used for batching operations. */
11967 void (*enter)(void);
11968 void (*leave)(void);
11969-};
11970+} __no_const;
11971
11972 struct pv_time_ops {
11973 unsigned long long (*sched_clock)(void);
11974 unsigned long (*get_tsc_khz)(void);
11975-};
11976+} __no_const;
11977
11978 struct pv_cpu_ops {
11979 /* hooks for various privileged instructions */
11980@@ -186,7 +186,7 @@ struct pv_cpu_ops {
11981
11982 void (*start_context_switch)(struct task_struct *prev);
11983 void (*end_context_switch)(struct task_struct *next);
11984-};
11985+} __no_const;
11986
11987 struct pv_irq_ops {
11988 /*
11989@@ -217,7 +217,7 @@ struct pv_apic_ops {
11990 unsigned long start_eip,
11991 unsigned long start_esp);
11992 #endif
11993-};
11994+} __no_const;
11995
11996 struct pv_mmu_ops {
11997 unsigned long (*read_cr2)(void);
11998@@ -301,6 +301,7 @@ struct pv_mmu_ops {
11999 struct paravirt_callee_save make_pud;
12000
12001 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
12002+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
12003 #endif /* PAGETABLE_LEVELS == 4 */
12004 #endif /* PAGETABLE_LEVELS >= 3 */
12005
12006@@ -316,6 +317,12 @@ struct pv_mmu_ops {
12007 an mfn. We can tell which is which from the index. */
12008 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
12009 phys_addr_t phys, pgprot_t flags);
12010+
12011+#ifdef CONFIG_PAX_KERNEXEC
12012+ unsigned long (*pax_open_kernel)(void);
12013+ unsigned long (*pax_close_kernel)(void);
12014+#endif
12015+
12016 };
12017
12018 struct raw_spinlock;
12019@@ -326,7 +333,7 @@ struct pv_lock_ops {
12020 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
12021 int (*spin_trylock)(struct raw_spinlock *lock);
12022 void (*spin_unlock)(struct raw_spinlock *lock);
12023-};
12024+} __no_const;
12025
12026 /* This contains all the paravirt structures: we get a convenient
12027 * number for each function using the offset which we use to indicate
12028diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
12029index b399988..3f47c38 100644
12030--- a/arch/x86/include/asm/pci_x86.h
12031+++ b/arch/x86/include/asm/pci_x86.h
12032@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
12033 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
12034
12035 struct pci_raw_ops {
12036- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
12037+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
12038 int reg, int len, u32 *val);
12039- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
12040+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
12041 int reg, int len, u32 val);
12042 };
12043
12044-extern struct pci_raw_ops *raw_pci_ops;
12045-extern struct pci_raw_ops *raw_pci_ext_ops;
12046+extern const struct pci_raw_ops *raw_pci_ops;
12047+extern const struct pci_raw_ops *raw_pci_ext_ops;
12048
12049-extern struct pci_raw_ops pci_direct_conf1;
12050+extern const struct pci_raw_ops pci_direct_conf1;
12051 extern bool port_cf9_safe;
12052
12053 /* arch_initcall level */
12054diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
12055index b65a36d..50345a4 100644
12056--- a/arch/x86/include/asm/percpu.h
12057+++ b/arch/x86/include/asm/percpu.h
12058@@ -78,6 +78,7 @@ do { \
12059 if (0) { \
12060 T__ tmp__; \
12061 tmp__ = (val); \
12062+ (void)tmp__; \
12063 } \
12064 switch (sizeof(var)) { \
12065 case 1: \
12066diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
12067index 271de94..ef944d6 100644
12068--- a/arch/x86/include/asm/pgalloc.h
12069+++ b/arch/x86/include/asm/pgalloc.h
12070@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
12071 pmd_t *pmd, pte_t *pte)
12072 {
12073 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
12074+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
12075+}
12076+
12077+static inline void pmd_populate_user(struct mm_struct *mm,
12078+ pmd_t *pmd, pte_t *pte)
12079+{
12080+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
12081 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
12082 }
12083
12084diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
12085index 2334982..70bc412 100644
12086--- a/arch/x86/include/asm/pgtable-2level.h
12087+++ b/arch/x86/include/asm/pgtable-2level.h
12088@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
12089
12090 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12091 {
12092+ pax_open_kernel();
12093 *pmdp = pmd;
12094+ pax_close_kernel();
12095 }
12096
12097 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12098diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
12099index 33927d2..ccde329 100644
12100--- a/arch/x86/include/asm/pgtable-3level.h
12101+++ b/arch/x86/include/asm/pgtable-3level.h
12102@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12103
12104 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12105 {
12106+ pax_open_kernel();
12107 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
12108+ pax_close_kernel();
12109 }
12110
12111 static inline void native_set_pud(pud_t *pudp, pud_t pud)
12112 {
12113+ pax_open_kernel();
12114 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
12115+ pax_close_kernel();
12116 }
12117
12118 /*
12119diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
12120index af6fd36..867ff74 100644
12121--- a/arch/x86/include/asm/pgtable.h
12122+++ b/arch/x86/include/asm/pgtable.h
12123@@ -39,6 +39,7 @@ extern struct list_head pgd_list;
12124
12125 #ifndef __PAGETABLE_PUD_FOLDED
12126 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
12127+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
12128 #define pgd_clear(pgd) native_pgd_clear(pgd)
12129 #endif
12130
12131@@ -74,12 +75,51 @@ extern struct list_head pgd_list;
12132
12133 #define arch_end_context_switch(prev) do {} while(0)
12134
12135+#define pax_open_kernel() native_pax_open_kernel()
12136+#define pax_close_kernel() native_pax_close_kernel()
12137 #endif /* CONFIG_PARAVIRT */
12138
12139+#define __HAVE_ARCH_PAX_OPEN_KERNEL
12140+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
12141+
12142+#ifdef CONFIG_PAX_KERNEXEC
12143+static inline unsigned long native_pax_open_kernel(void)
12144+{
12145+ unsigned long cr0;
12146+
12147+ preempt_disable();
12148+ barrier();
12149+ cr0 = read_cr0() ^ X86_CR0_WP;
12150+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
12151+ write_cr0(cr0);
12152+ return cr0 ^ X86_CR0_WP;
12153+}
12154+
12155+static inline unsigned long native_pax_close_kernel(void)
12156+{
12157+ unsigned long cr0;
12158+
12159+ cr0 = read_cr0() ^ X86_CR0_WP;
12160+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
12161+ write_cr0(cr0);
12162+ barrier();
12163+ preempt_enable_no_resched();
12164+ return cr0 ^ X86_CR0_WP;
12165+}
12166+#else
12167+static inline unsigned long native_pax_open_kernel(void) { return 0; }
12168+static inline unsigned long native_pax_close_kernel(void) { return 0; }
12169+#endif
12170+
12171 /*
12172 * The following only work if pte_present() is true.
12173 * Undefined behaviour if not..
12174 */
12175+static inline int pte_user(pte_t pte)
12176+{
12177+ return pte_val(pte) & _PAGE_USER;
12178+}
12179+
12180 static inline int pte_dirty(pte_t pte)
12181 {
12182 return pte_flags(pte) & _PAGE_DIRTY;
12183@@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
12184 return pte_clear_flags(pte, _PAGE_RW);
12185 }
12186
12187+static inline pte_t pte_mkread(pte_t pte)
12188+{
12189+ return __pte(pte_val(pte) | _PAGE_USER);
12190+}
12191+
12192 static inline pte_t pte_mkexec(pte_t pte)
12193 {
12194- return pte_clear_flags(pte, _PAGE_NX);
12195+#ifdef CONFIG_X86_PAE
12196+ if (__supported_pte_mask & _PAGE_NX)
12197+ return pte_clear_flags(pte, _PAGE_NX);
12198+ else
12199+#endif
12200+ return pte_set_flags(pte, _PAGE_USER);
12201+}
12202+
12203+static inline pte_t pte_exprotect(pte_t pte)
12204+{
12205+#ifdef CONFIG_X86_PAE
12206+ if (__supported_pte_mask & _PAGE_NX)
12207+ return pte_set_flags(pte, _PAGE_NX);
12208+ else
12209+#endif
12210+ return pte_clear_flags(pte, _PAGE_USER);
12211 }
12212
12213 static inline pte_t pte_mkdirty(pte_t pte)
12214@@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
12215 #endif
12216
12217 #ifndef __ASSEMBLY__
12218+
12219+#ifdef CONFIG_PAX_PER_CPU_PGD
12220+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
12221+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
12222+{
12223+ return cpu_pgd[cpu];
12224+}
12225+#endif
12226+
12227 #include <linux/mm_types.h>
12228
12229 static inline int pte_none(pte_t pte)
12230@@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
12231
12232 static inline int pgd_bad(pgd_t pgd)
12233 {
12234- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
12235+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
12236 }
12237
12238 static inline int pgd_none(pgd_t pgd)
12239@@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
12240 * pgd_offset() returns a (pgd_t *)
12241 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
12242 */
12243-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
12244+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
12245+
12246+#ifdef CONFIG_PAX_PER_CPU_PGD
12247+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
12248+#endif
12249+
12250 /*
12251 * a shortcut which implies the use of the kernel's pgd, instead
12252 * of a process's
12253@@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
12254 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
12255 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
12256
12257+#ifdef CONFIG_X86_32
12258+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
12259+#else
12260+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
12261+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
12262+
12263+#ifdef CONFIG_PAX_MEMORY_UDEREF
12264+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
12265+#else
12266+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
12267+#endif
12268+
12269+#endif
12270+
12271 #ifndef __ASSEMBLY__
12272
12273 extern int direct_gbpages;
12274@@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
12275 * dst and src can be on the same page, but the range must not overlap,
12276 * and must not cross a page boundary.
12277 */
12278-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
12279+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
12280 {
12281- memcpy(dst, src, count * sizeof(pgd_t));
12282+ pax_open_kernel();
12283+ while (count--)
12284+ *dst++ = *src++;
12285+ pax_close_kernel();
12286 }
12287
12288+#ifdef CONFIG_PAX_PER_CPU_PGD
12289+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
12290+#endif
12291+
12292+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12293+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
12294+#else
12295+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
12296+#endif
12297
12298 #include <asm-generic/pgtable.h>
12299 #endif /* __ASSEMBLY__ */
12300diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
12301index 750f1bf..971e839 100644
12302--- a/arch/x86/include/asm/pgtable_32.h
12303+++ b/arch/x86/include/asm/pgtable_32.h
12304@@ -26,9 +26,6 @@
12305 struct mm_struct;
12306 struct vm_area_struct;
12307
12308-extern pgd_t swapper_pg_dir[1024];
12309-extern pgd_t trampoline_pg_dir[1024];
12310-
12311 static inline void pgtable_cache_init(void) { }
12312 static inline void check_pgt_cache(void) { }
12313 void paging_init(void);
12314@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
12315 # include <asm/pgtable-2level.h>
12316 #endif
12317
12318+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
12319+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
12320+#ifdef CONFIG_X86_PAE
12321+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
12322+#endif
12323+
12324 #if defined(CONFIG_HIGHPTE)
12325 #define __KM_PTE \
12326 (in_nmi() ? KM_NMI_PTE : \
12327@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
12328 /* Clear a kernel PTE and flush it from the TLB */
12329 #define kpte_clear_flush(ptep, vaddr) \
12330 do { \
12331+ pax_open_kernel(); \
12332 pte_clear(&init_mm, (vaddr), (ptep)); \
12333+ pax_close_kernel(); \
12334 __flush_tlb_one((vaddr)); \
12335 } while (0)
12336
12337@@ -85,6 +90,9 @@ do { \
12338
12339 #endif /* !__ASSEMBLY__ */
12340
12341+#define HAVE_ARCH_UNMAPPED_AREA
12342+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
12343+
12344 /*
12345 * kern_addr_valid() is (1) for FLATMEM and (0) for
12346 * SPARSEMEM and DISCONTIGMEM
12347diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
12348index 5e67c15..12d5c47 100644
12349--- a/arch/x86/include/asm/pgtable_32_types.h
12350+++ b/arch/x86/include/asm/pgtable_32_types.h
12351@@ -8,7 +8,7 @@
12352 */
12353 #ifdef CONFIG_X86_PAE
12354 # include <asm/pgtable-3level_types.h>
12355-# define PMD_SIZE (1UL << PMD_SHIFT)
12356+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
12357 # define PMD_MASK (~(PMD_SIZE - 1))
12358 #else
12359 # include <asm/pgtable-2level_types.h>
12360@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
12361 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
12362 #endif
12363
12364+#ifdef CONFIG_PAX_KERNEXEC
12365+#ifndef __ASSEMBLY__
12366+extern unsigned char MODULES_EXEC_VADDR[];
12367+extern unsigned char MODULES_EXEC_END[];
12368+#endif
12369+#include <asm/boot.h>
12370+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
12371+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
12372+#else
12373+#define ktla_ktva(addr) (addr)
12374+#define ktva_ktla(addr) (addr)
12375+#endif
12376+
12377 #define MODULES_VADDR VMALLOC_START
12378 #define MODULES_END VMALLOC_END
12379 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
12380diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
12381index c57a301..6b414ff 100644
12382--- a/arch/x86/include/asm/pgtable_64.h
12383+++ b/arch/x86/include/asm/pgtable_64.h
12384@@ -16,10 +16,14 @@
12385
12386 extern pud_t level3_kernel_pgt[512];
12387 extern pud_t level3_ident_pgt[512];
12388+extern pud_t level3_vmalloc_start_pgt[512];
12389+extern pud_t level3_vmalloc_end_pgt[512];
12390+extern pud_t level3_vmemmap_pgt[512];
12391+extern pud_t level2_vmemmap_pgt[512];
12392 extern pmd_t level2_kernel_pgt[512];
12393 extern pmd_t level2_fixmap_pgt[512];
12394-extern pmd_t level2_ident_pgt[512];
12395-extern pgd_t init_level4_pgt[];
12396+extern pmd_t level2_ident_pgt[512*2];
12397+extern pgd_t init_level4_pgt[512];
12398
12399 #define swapper_pg_dir init_level4_pgt
12400
12401@@ -74,7 +78,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
12402
12403 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12404 {
12405+ pax_open_kernel();
12406 *pmdp = pmd;
12407+ pax_close_kernel();
12408 }
12409
12410 static inline void native_pmd_clear(pmd_t *pmd)
12411@@ -94,6 +100,13 @@ static inline void native_pud_clear(pud_t *pud)
12412
12413 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
12414 {
12415+ pax_open_kernel();
12416+ *pgdp = pgd;
12417+ pax_close_kernel();
12418+}
12419+
12420+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
12421+{
12422 *pgdp = pgd;
12423 }
12424
12425diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
12426index 766ea16..5b96cb3 100644
12427--- a/arch/x86/include/asm/pgtable_64_types.h
12428+++ b/arch/x86/include/asm/pgtable_64_types.h
12429@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
12430 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
12431 #define MODULES_END _AC(0xffffffffff000000, UL)
12432 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
12433+#define MODULES_EXEC_VADDR MODULES_VADDR
12434+#define MODULES_EXEC_END MODULES_END
12435+
12436+#define ktla_ktva(addr) (addr)
12437+#define ktva_ktla(addr) (addr)
12438
12439 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
12440diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
12441index d1f4a76..2f46ba1 100644
12442--- a/arch/x86/include/asm/pgtable_types.h
12443+++ b/arch/x86/include/asm/pgtable_types.h
12444@@ -16,12 +16,11 @@
12445 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
12446 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
12447 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
12448-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
12449+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
12450 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
12451 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
12452 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
12453-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
12454-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
12455+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
12456 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
12457
12458 /* If _PAGE_BIT_PRESENT is clear, we use these: */
12459@@ -39,7 +38,6 @@
12460 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
12461 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
12462 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
12463-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
12464 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
12465 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
12466 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
12467@@ -55,8 +53,10 @@
12468
12469 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
12470 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
12471-#else
12472+#elif defined(CONFIG_KMEMCHECK)
12473 #define _PAGE_NX (_AT(pteval_t, 0))
12474+#else
12475+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
12476 #endif
12477
12478 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
12479@@ -93,6 +93,9 @@
12480 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
12481 _PAGE_ACCESSED)
12482
12483+#define PAGE_READONLY_NOEXEC PAGE_READONLY
12484+#define PAGE_SHARED_NOEXEC PAGE_SHARED
12485+
12486 #define __PAGE_KERNEL_EXEC \
12487 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
12488 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
12489@@ -103,8 +106,8 @@
12490 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
12491 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
12492 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
12493-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
12494-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
12495+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
12496+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
12497 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
12498 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
12499 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
12500@@ -163,8 +166,8 @@
12501 * bits are combined, this will alow user to access the high address mapped
12502 * VDSO in the presence of CONFIG_COMPAT_VDSO
12503 */
12504-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
12505-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
12506+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
12507+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
12508 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
12509 #endif
12510
12511@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
12512 {
12513 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
12514 }
12515+#endif
12516
12517+#if PAGETABLE_LEVELS == 3
12518+#include <asm-generic/pgtable-nopud.h>
12519+#endif
12520+
12521+#if PAGETABLE_LEVELS == 2
12522+#include <asm-generic/pgtable-nopmd.h>
12523+#endif
12524+
12525+#ifndef __ASSEMBLY__
12526 #if PAGETABLE_LEVELS > 3
12527 typedef struct { pudval_t pud; } pud_t;
12528
12529@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
12530 return pud.pud;
12531 }
12532 #else
12533-#include <asm-generic/pgtable-nopud.h>
12534-
12535 static inline pudval_t native_pud_val(pud_t pud)
12536 {
12537 return native_pgd_val(pud.pgd);
12538@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
12539 return pmd.pmd;
12540 }
12541 #else
12542-#include <asm-generic/pgtable-nopmd.h>
12543-
12544 static inline pmdval_t native_pmd_val(pmd_t pmd)
12545 {
12546 return native_pgd_val(pmd.pud.pgd);
12547@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
12548
12549 extern pteval_t __supported_pte_mask;
12550 extern void set_nx(void);
12551+
12552+#ifdef CONFIG_X86_32
12553+#ifdef CONFIG_X86_PAE
12554 extern int nx_enabled;
12555+#else
12556+#define nx_enabled (0)
12557+#endif
12558+#else
12559+#define nx_enabled (1)
12560+#endif
12561
12562 #define pgprot_writecombine pgprot_writecombine
12563 extern pgprot_t pgprot_writecombine(pgprot_t prot);
12564diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
12565index fa04dea..5f823fc 100644
12566--- a/arch/x86/include/asm/processor.h
12567+++ b/arch/x86/include/asm/processor.h
12568@@ -272,7 +272,7 @@ struct tss_struct {
12569
12570 } ____cacheline_aligned;
12571
12572-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
12573+extern struct tss_struct init_tss[NR_CPUS];
12574
12575 /*
12576 * Save the original ist values for checking stack pointers during debugging
12577@@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
12578 */
12579 #define TASK_SIZE PAGE_OFFSET
12580 #define TASK_SIZE_MAX TASK_SIZE
12581+
12582+#ifdef CONFIG_PAX_SEGMEXEC
12583+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
12584+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
12585+#else
12586 #define STACK_TOP TASK_SIZE
12587-#define STACK_TOP_MAX STACK_TOP
12588+#endif
12589+
12590+#define STACK_TOP_MAX TASK_SIZE
12591
12592 #define INIT_THREAD { \
12593- .sp0 = sizeof(init_stack) + (long)&init_stack, \
12594+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
12595 .vm86_info = NULL, \
12596 .sysenter_cs = __KERNEL_CS, \
12597 .io_bitmap_ptr = NULL, \
12598@@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
12599 */
12600 #define INIT_TSS { \
12601 .x86_tss = { \
12602- .sp0 = sizeof(init_stack) + (long)&init_stack, \
12603+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
12604 .ss0 = __KERNEL_DS, \
12605 .ss1 = __KERNEL_CS, \
12606 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
12607@@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
12608 extern unsigned long thread_saved_pc(struct task_struct *tsk);
12609
12610 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
12611-#define KSTK_TOP(info) \
12612-({ \
12613- unsigned long *__ptr = (unsigned long *)(info); \
12614- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
12615-})
12616+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
12617
12618 /*
12619 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
12620@@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12621 #define task_pt_regs(task) \
12622 ({ \
12623 struct pt_regs *__regs__; \
12624- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
12625+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
12626 __regs__ - 1; \
12627 })
12628
12629@@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12630 /*
12631 * User space process size. 47bits minus one guard page.
12632 */
12633-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
12634+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
12635
12636 /* This decides where the kernel will search for a free chunk of vm
12637 * space during mmap's.
12638 */
12639 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
12640- 0xc0000000 : 0xFFFFe000)
12641+ 0xc0000000 : 0xFFFFf000)
12642
12643 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
12644 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
12645@@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12646 #define STACK_TOP_MAX TASK_SIZE_MAX
12647
12648 #define INIT_THREAD { \
12649- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
12650+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
12651 }
12652
12653 #define INIT_TSS { \
12654- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
12655+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
12656 }
12657
12658 /*
12659@@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
12660 */
12661 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
12662
12663+#ifdef CONFIG_PAX_SEGMEXEC
12664+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
12665+#endif
12666+
12667 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
12668
12669 /* Get/set a process' ability to use the timestamp counter instruction */
12670diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
12671index 0f0d908..f2e3da2 100644
12672--- a/arch/x86/include/asm/ptrace.h
12673+++ b/arch/x86/include/asm/ptrace.h
12674@@ -151,28 +151,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
12675 }
12676
12677 /*
12678- * user_mode_vm(regs) determines whether a register set came from user mode.
12679+ * user_mode(regs) determines whether a register set came from user mode.
12680 * This is true if V8086 mode was enabled OR if the register set was from
12681 * protected mode with RPL-3 CS value. This tricky test checks that with
12682 * one comparison. Many places in the kernel can bypass this full check
12683- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
12684+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
12685+ * be used.
12686 */
12687-static inline int user_mode(struct pt_regs *regs)
12688+static inline int user_mode_novm(struct pt_regs *regs)
12689 {
12690 #ifdef CONFIG_X86_32
12691 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
12692 #else
12693- return !!(regs->cs & 3);
12694+ return !!(regs->cs & SEGMENT_RPL_MASK);
12695 #endif
12696 }
12697
12698-static inline int user_mode_vm(struct pt_regs *regs)
12699+static inline int user_mode(struct pt_regs *regs)
12700 {
12701 #ifdef CONFIG_X86_32
12702 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
12703 USER_RPL;
12704 #else
12705- return user_mode(regs);
12706+ return user_mode_novm(regs);
12707 #endif
12708 }
12709
12710diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
12711index 562d4fd..6e39df1 100644
12712--- a/arch/x86/include/asm/reboot.h
12713+++ b/arch/x86/include/asm/reboot.h
12714@@ -6,19 +6,19 @@
12715 struct pt_regs;
12716
12717 struct machine_ops {
12718- void (*restart)(char *cmd);
12719- void (*halt)(void);
12720- void (*power_off)(void);
12721+ void (* __noreturn restart)(char *cmd);
12722+ void (* __noreturn halt)(void);
12723+ void (* __noreturn power_off)(void);
12724 void (*shutdown)(void);
12725 void (*crash_shutdown)(struct pt_regs *);
12726- void (*emergency_restart)(void);
12727-};
12728+ void (* __noreturn emergency_restart)(void);
12729+} __no_const;
12730
12731 extern struct machine_ops machine_ops;
12732
12733 void native_machine_crash_shutdown(struct pt_regs *regs);
12734 void native_machine_shutdown(void);
12735-void machine_real_restart(const unsigned char *code, int length);
12736+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
12737
12738 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
12739 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
12740diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
12741index 606ede1..dbfff37 100644
12742--- a/arch/x86/include/asm/rwsem.h
12743+++ b/arch/x86/include/asm/rwsem.h
12744@@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
12745 {
12746 asm volatile("# beginning down_read\n\t"
12747 LOCK_PREFIX _ASM_INC "(%1)\n\t"
12748+
12749+#ifdef CONFIG_PAX_REFCOUNT
12750+ "jno 0f\n"
12751+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
12752+ "int $4\n0:\n"
12753+ _ASM_EXTABLE(0b, 0b)
12754+#endif
12755+
12756 /* adds 0x00000001, returns the old value */
12757 " jns 1f\n"
12758 " call call_rwsem_down_read_failed\n"
12759@@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
12760 "1:\n\t"
12761 " mov %1,%2\n\t"
12762 " add %3,%2\n\t"
12763+
12764+#ifdef CONFIG_PAX_REFCOUNT
12765+ "jno 0f\n"
12766+ "sub %3,%2\n"
12767+ "int $4\n0:\n"
12768+ _ASM_EXTABLE(0b, 0b)
12769+#endif
12770+
12771 " jle 2f\n\t"
12772 LOCK_PREFIX " cmpxchg %2,%0\n\t"
12773 " jnz 1b\n\t"
12774@@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
12775 tmp = RWSEM_ACTIVE_WRITE_BIAS;
12776 asm volatile("# beginning down_write\n\t"
12777 LOCK_PREFIX " xadd %1,(%2)\n\t"
12778+
12779+#ifdef CONFIG_PAX_REFCOUNT
12780+ "jno 0f\n"
12781+ "mov %1,(%2)\n"
12782+ "int $4\n0:\n"
12783+ _ASM_EXTABLE(0b, 0b)
12784+#endif
12785+
12786 /* subtract 0x0000ffff, returns the old value */
12787 " test %1,%1\n\t"
12788 /* was the count 0 before? */
12789@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
12790 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
12791 asm volatile("# beginning __up_read\n\t"
12792 LOCK_PREFIX " xadd %1,(%2)\n\t"
12793+
12794+#ifdef CONFIG_PAX_REFCOUNT
12795+ "jno 0f\n"
12796+ "mov %1,(%2)\n"
12797+ "int $4\n0:\n"
12798+ _ASM_EXTABLE(0b, 0b)
12799+#endif
12800+
12801 /* subtracts 1, returns the old value */
12802 " jns 1f\n\t"
12803 " call call_rwsem_wake\n"
12804@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
12805 rwsem_count_t tmp;
12806 asm volatile("# beginning __up_write\n\t"
12807 LOCK_PREFIX " xadd %1,(%2)\n\t"
12808+
12809+#ifdef CONFIG_PAX_REFCOUNT
12810+ "jno 0f\n"
12811+ "mov %1,(%2)\n"
12812+ "int $4\n0:\n"
12813+ _ASM_EXTABLE(0b, 0b)
12814+#endif
12815+
12816 /* tries to transition
12817 0xffff0001 -> 0x00000000 */
12818 " jz 1f\n"
12819@@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12820 {
12821 asm volatile("# beginning __downgrade_write\n\t"
12822 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
12823+
12824+#ifdef CONFIG_PAX_REFCOUNT
12825+ "jno 0f\n"
12826+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
12827+ "int $4\n0:\n"
12828+ _ASM_EXTABLE(0b, 0b)
12829+#endif
12830+
12831 /*
12832 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
12833 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
12834@@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12835 static inline void rwsem_atomic_add(rwsem_count_t delta,
12836 struct rw_semaphore *sem)
12837 {
12838- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
12839+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
12840+
12841+#ifdef CONFIG_PAX_REFCOUNT
12842+ "jno 0f\n"
12843+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
12844+ "int $4\n0:\n"
12845+ _ASM_EXTABLE(0b, 0b)
12846+#endif
12847+
12848 : "+m" (sem->count)
12849 : "er" (delta));
12850 }
12851@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
12852 {
12853 rwsem_count_t tmp = delta;
12854
12855- asm volatile(LOCK_PREFIX "xadd %0,%1"
12856+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
12857+
12858+#ifdef CONFIG_PAX_REFCOUNT
12859+ "jno 0f\n"
12860+ "mov %0,%1\n"
12861+ "int $4\n0:\n"
12862+ _ASM_EXTABLE(0b, 0b)
12863+#endif
12864+
12865 : "+r" (tmp), "+m" (sem->count)
12866 : : "memory");
12867
12868diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
12869index 14e0ed8..7f7dd5e 100644
12870--- a/arch/x86/include/asm/segment.h
12871+++ b/arch/x86/include/asm/segment.h
12872@@ -62,10 +62,15 @@
12873 * 26 - ESPFIX small SS
12874 * 27 - per-cpu [ offset to per-cpu data area ]
12875 * 28 - stack_canary-20 [ for stack protector ]
12876- * 29 - unused
12877- * 30 - unused
12878+ * 29 - PCI BIOS CS
12879+ * 30 - PCI BIOS DS
12880 * 31 - TSS for double fault handler
12881 */
12882+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
12883+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
12884+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
12885+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
12886+
12887 #define GDT_ENTRY_TLS_MIN 6
12888 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
12889
12890@@ -77,6 +82,8 @@
12891
12892 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
12893
12894+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
12895+
12896 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
12897
12898 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
12899@@ -88,7 +95,7 @@
12900 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
12901 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
12902
12903-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
12904+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
12905 #ifdef CONFIG_SMP
12906 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
12907 #else
12908@@ -102,6 +109,12 @@
12909 #define __KERNEL_STACK_CANARY 0
12910 #endif
12911
12912+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
12913+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12914+
12915+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
12916+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12917+
12918 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12919
12920 /*
12921@@ -139,7 +152,7 @@
12922 */
12923
12924 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12925-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12926+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12927
12928
12929 #else
12930@@ -163,6 +176,8 @@
12931 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
12932 #define __USER32_DS __USER_DS
12933
12934+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12935+
12936 #define GDT_ENTRY_TSS 8 /* needs two entries */
12937 #define GDT_ENTRY_LDT 10 /* needs two entries */
12938 #define GDT_ENTRY_TLS_MIN 12
12939@@ -183,6 +198,7 @@
12940 #endif
12941
12942 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
12943+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
12944 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
12945 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
12946 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
12947diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12948index 4c2f63c..5685db2 100644
12949--- a/arch/x86/include/asm/smp.h
12950+++ b/arch/x86/include/asm/smp.h
12951@@ -24,7 +24,7 @@ extern unsigned int num_processors;
12952 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
12953 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12954 DECLARE_PER_CPU(u16, cpu_llc_id);
12955-DECLARE_PER_CPU(int, cpu_number);
12956+DECLARE_PER_CPU(unsigned int, cpu_number);
12957
12958 static inline struct cpumask *cpu_sibling_mask(int cpu)
12959 {
12960@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
12961 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
12962
12963 /* Static state in head.S used to set up a CPU */
12964-extern struct {
12965- void *sp;
12966- unsigned short ss;
12967-} stack_start;
12968+extern unsigned long stack_start; /* Initial stack pointer address */
12969
12970 struct smp_ops {
12971 void (*smp_prepare_boot_cpu)(void);
12972@@ -60,7 +57,7 @@ struct smp_ops {
12973
12974 void (*send_call_func_ipi)(const struct cpumask *mask);
12975 void (*send_call_func_single_ipi)(int cpu);
12976-};
12977+} __no_const;
12978
12979 /* Globals due to paravirt */
12980 extern void set_cpu_sibling_map(int cpu);
12981@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12982 extern int safe_smp_processor_id(void);
12983
12984 #elif defined(CONFIG_X86_64_SMP)
12985-#define raw_smp_processor_id() (percpu_read(cpu_number))
12986-
12987-#define stack_smp_processor_id() \
12988-({ \
12989- struct thread_info *ti; \
12990- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12991- ti->cpu; \
12992-})
12993+#define raw_smp_processor_id() (percpu_read(cpu_number))
12994+#define stack_smp_processor_id() raw_smp_processor_id()
12995 #define safe_smp_processor_id() smp_processor_id()
12996
12997 #endif
12998diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12999index 4e77853..4359783 100644
13000--- a/arch/x86/include/asm/spinlock.h
13001+++ b/arch/x86/include/asm/spinlock.h
13002@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
13003 static inline void __raw_read_lock(raw_rwlock_t *rw)
13004 {
13005 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
13006+
13007+#ifdef CONFIG_PAX_REFCOUNT
13008+ "jno 0f\n"
13009+ LOCK_PREFIX " addl $1,(%0)\n"
13010+ "int $4\n0:\n"
13011+ _ASM_EXTABLE(0b, 0b)
13012+#endif
13013+
13014 "jns 1f\n"
13015 "call __read_lock_failed\n\t"
13016 "1:\n"
13017@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
13018 static inline void __raw_write_lock(raw_rwlock_t *rw)
13019 {
13020 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
13021+
13022+#ifdef CONFIG_PAX_REFCOUNT
13023+ "jno 0f\n"
13024+ LOCK_PREFIX " addl %1,(%0)\n"
13025+ "int $4\n0:\n"
13026+ _ASM_EXTABLE(0b, 0b)
13027+#endif
13028+
13029 "jz 1f\n"
13030 "call __write_lock_failed\n\t"
13031 "1:\n"
13032@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
13033
13034 static inline void __raw_read_unlock(raw_rwlock_t *rw)
13035 {
13036- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
13037+ asm volatile(LOCK_PREFIX "incl %0\n"
13038+
13039+#ifdef CONFIG_PAX_REFCOUNT
13040+ "jno 0f\n"
13041+ LOCK_PREFIX "decl %0\n"
13042+ "int $4\n0:\n"
13043+ _ASM_EXTABLE(0b, 0b)
13044+#endif
13045+
13046+ :"+m" (rw->lock) : : "memory");
13047 }
13048
13049 static inline void __raw_write_unlock(raw_rwlock_t *rw)
13050 {
13051- asm volatile(LOCK_PREFIX "addl %1, %0"
13052+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
13053+
13054+#ifdef CONFIG_PAX_REFCOUNT
13055+ "jno 0f\n"
13056+ LOCK_PREFIX "subl %1, %0\n"
13057+ "int $4\n0:\n"
13058+ _ASM_EXTABLE(0b, 0b)
13059+#endif
13060+
13061 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
13062 }
13063
13064diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
13065index 1575177..cb23f52 100644
13066--- a/arch/x86/include/asm/stackprotector.h
13067+++ b/arch/x86/include/asm/stackprotector.h
13068@@ -48,7 +48,7 @@
13069 * head_32 for boot CPU and setup_per_cpu_areas() for others.
13070 */
13071 #define GDT_STACK_CANARY_INIT \
13072- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
13073+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
13074
13075 /*
13076 * Initialize the stackprotector canary value.
13077@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
13078
13079 static inline void load_stack_canary_segment(void)
13080 {
13081-#ifdef CONFIG_X86_32
13082+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
13083 asm volatile ("mov %0, %%gs" : : "r" (0));
13084 #endif
13085 }
13086diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
13087index 1bb6e39..234246f 100644
13088--- a/arch/x86/include/asm/syscalls.h
13089+++ b/arch/x86/include/asm/syscalls.h
13090@@ -24,7 +24,7 @@ int sys_fork(struct pt_regs *);
13091 int sys_vfork(struct pt_regs *);
13092
13093 /* kernel/ldt.c */
13094-asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
13095+asmlinkage int sys_modify_ldt(int, void __user *, unsigned long) __size_overflow(3);
13096
13097 /* kernel/signal.c */
13098 long sys_rt_sigreturn(struct pt_regs *);
13099diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
13100index e0fbf29..858ef4a 100644
13101--- a/arch/x86/include/asm/system.h
13102+++ b/arch/x86/include/asm/system.h
13103@@ -132,7 +132,7 @@ do { \
13104 "thread_return:\n\t" \
13105 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
13106 __switch_canary \
13107- "movq %P[thread_info](%%rsi),%%r8\n\t" \
13108+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
13109 "movq %%rax,%%rdi\n\t" \
13110 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
13111 "jnz ret_from_fork\n\t" \
13112@@ -143,7 +143,7 @@ do { \
13113 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
13114 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
13115 [_tif_fork] "i" (_TIF_FORK), \
13116- [thread_info] "i" (offsetof(struct task_struct, stack)), \
13117+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
13118 [current_task] "m" (per_cpu_var(current_task)) \
13119 __switch_canary_iparam \
13120 : "memory", "cc" __EXTRA_CLOBBER)
13121@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
13122 {
13123 unsigned long __limit;
13124 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
13125- return __limit + 1;
13126+ return __limit;
13127 }
13128
13129 static inline void native_clts(void)
13130@@ -340,12 +340,12 @@ void enable_hlt(void);
13131
13132 void cpu_idle_wait(void);
13133
13134-extern unsigned long arch_align_stack(unsigned long sp);
13135+#define arch_align_stack(x) ((x) & ~0xfUL)
13136 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
13137
13138 void default_idle(void);
13139
13140-void stop_this_cpu(void *dummy);
13141+void stop_this_cpu(void *dummy) __noreturn;
13142
13143 /*
13144 * Force strict CPU ordering.
13145diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
13146index 19c3ce4..8962535 100644
13147--- a/arch/x86/include/asm/thread_info.h
13148+++ b/arch/x86/include/asm/thread_info.h
13149@@ -10,6 +10,7 @@
13150 #include <linux/compiler.h>
13151 #include <asm/page.h>
13152 #include <asm/types.h>
13153+#include <asm/percpu.h>
13154
13155 /*
13156 * low level task data that entry.S needs immediate access to
13157@@ -24,7 +25,6 @@ struct exec_domain;
13158 #include <asm/atomic.h>
13159
13160 struct thread_info {
13161- struct task_struct *task; /* main task structure */
13162 struct exec_domain *exec_domain; /* execution domain */
13163 __u32 flags; /* low level flags */
13164 __u32 status; /* thread synchronous flags */
13165@@ -34,18 +34,12 @@ struct thread_info {
13166 mm_segment_t addr_limit;
13167 struct restart_block restart_block;
13168 void __user *sysenter_return;
13169-#ifdef CONFIG_X86_32
13170- unsigned long previous_esp; /* ESP of the previous stack in
13171- case of nested (IRQ) stacks
13172- */
13173- __u8 supervisor_stack[0];
13174-#endif
13175+ unsigned long lowest_stack;
13176 int uaccess_err;
13177 };
13178
13179-#define INIT_THREAD_INFO(tsk) \
13180+#define INIT_THREAD_INFO \
13181 { \
13182- .task = &tsk, \
13183 .exec_domain = &default_exec_domain, \
13184 .flags = 0, \
13185 .cpu = 0, \
13186@@ -56,7 +50,7 @@ struct thread_info {
13187 }, \
13188 }
13189
13190-#define init_thread_info (init_thread_union.thread_info)
13191+#define init_thread_info (init_thread_union.stack)
13192 #define init_stack (init_thread_union.stack)
13193
13194 #else /* !__ASSEMBLY__ */
13195@@ -163,45 +157,40 @@ struct thread_info {
13196 #define alloc_thread_info(tsk) \
13197 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
13198
13199-#ifdef CONFIG_X86_32
13200-
13201-#define STACK_WARN (THREAD_SIZE/8)
13202-/*
13203- * macros/functions for gaining access to the thread information structure
13204- *
13205- * preempt_count needs to be 1 initially, until the scheduler is functional.
13206- */
13207-#ifndef __ASSEMBLY__
13208-
13209-
13210-/* how to get the current stack pointer from C */
13211-register unsigned long current_stack_pointer asm("esp") __used;
13212-
13213-/* how to get the thread information struct from C */
13214-static inline struct thread_info *current_thread_info(void)
13215-{
13216- return (struct thread_info *)
13217- (current_stack_pointer & ~(THREAD_SIZE - 1));
13218-}
13219-
13220-#else /* !__ASSEMBLY__ */
13221-
13222+#ifdef __ASSEMBLY__
13223 /* how to get the thread information struct from ASM */
13224 #define GET_THREAD_INFO(reg) \
13225- movl $-THREAD_SIZE, reg; \
13226- andl %esp, reg
13227+ mov PER_CPU_VAR(current_tinfo), reg
13228
13229 /* use this one if reg already contains %esp */
13230-#define GET_THREAD_INFO_WITH_ESP(reg) \
13231- andl $-THREAD_SIZE, reg
13232+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
13233+#else
13234+/* how to get the thread information struct from C */
13235+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
13236+
13237+static __always_inline struct thread_info *current_thread_info(void)
13238+{
13239+ return percpu_read_stable(current_tinfo);
13240+}
13241+#endif
13242+
13243+#ifdef CONFIG_X86_32
13244+
13245+#define STACK_WARN (THREAD_SIZE/8)
13246+/*
13247+ * macros/functions for gaining access to the thread information structure
13248+ *
13249+ * preempt_count needs to be 1 initially, until the scheduler is functional.
13250+ */
13251+#ifndef __ASSEMBLY__
13252+
13253+/* how to get the current stack pointer from C */
13254+register unsigned long current_stack_pointer asm("esp") __used;
13255
13256 #endif
13257
13258 #else /* X86_32 */
13259
13260-#include <asm/percpu.h>
13261-#define KERNEL_STACK_OFFSET (5*8)
13262-
13263 /*
13264 * macros/functions for gaining access to the thread information structure
13265 * preempt_count needs to be 1 initially, until the scheduler is functional.
13266@@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
13267 #ifndef __ASSEMBLY__
13268 DECLARE_PER_CPU(unsigned long, kernel_stack);
13269
13270-static inline struct thread_info *current_thread_info(void)
13271-{
13272- struct thread_info *ti;
13273- ti = (void *)(percpu_read_stable(kernel_stack) +
13274- KERNEL_STACK_OFFSET - THREAD_SIZE);
13275- return ti;
13276-}
13277-
13278-#else /* !__ASSEMBLY__ */
13279-
13280-/* how to get the thread information struct from ASM */
13281-#define GET_THREAD_INFO(reg) \
13282- movq PER_CPU_VAR(kernel_stack),reg ; \
13283- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
13284-
13285+/* how to get the current stack pointer from C */
13286+register unsigned long current_stack_pointer asm("rsp") __used;
13287 #endif
13288
13289 #endif /* !X86_32 */
13290@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
13291 extern void free_thread_info(struct thread_info *ti);
13292 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
13293 #define arch_task_cache_init arch_task_cache_init
13294+
13295+#define __HAVE_THREAD_FUNCTIONS
13296+#define task_thread_info(task) (&(task)->tinfo)
13297+#define task_stack_page(task) ((task)->stack)
13298+#define setup_thread_stack(p, org) do {} while (0)
13299+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
13300+
13301+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
13302+extern struct task_struct *alloc_task_struct(void);
13303+extern void free_task_struct(struct task_struct *);
13304+
13305 #endif
13306 #endif /* _ASM_X86_THREAD_INFO_H */
13307diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
13308index 61c5874..8a046e9 100644
13309--- a/arch/x86/include/asm/uaccess.h
13310+++ b/arch/x86/include/asm/uaccess.h
13311@@ -8,12 +8,15 @@
13312 #include <linux/thread_info.h>
13313 #include <linux/prefetch.h>
13314 #include <linux/string.h>
13315+#include <linux/sched.h>
13316 #include <asm/asm.h>
13317 #include <asm/page.h>
13318
13319 #define VERIFY_READ 0
13320 #define VERIFY_WRITE 1
13321
13322+extern void check_object_size(const void *ptr, unsigned long n, bool to);
13323+
13324 /*
13325 * The fs value determines whether argument validity checking should be
13326 * performed or not. If get_fs() == USER_DS, checking is performed, with
13327@@ -29,7 +32,12 @@
13328
13329 #define get_ds() (KERNEL_DS)
13330 #define get_fs() (current_thread_info()->addr_limit)
13331+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
13332+void __set_fs(mm_segment_t x);
13333+void set_fs(mm_segment_t x);
13334+#else
13335 #define set_fs(x) (current_thread_info()->addr_limit = (x))
13336+#endif
13337
13338 #define segment_eq(a, b) ((a).seg == (b).seg)
13339
13340@@ -77,7 +85,33 @@
13341 * checks that the pointer is in the user space range - after calling
13342 * this function, memory access functions may still return -EFAULT.
13343 */
13344-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
13345+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
13346+#define access_ok(type, addr, size) \
13347+({ \
13348+ long __size = size; \
13349+ unsigned long __addr = (unsigned long)addr; \
13350+ unsigned long __addr_ao = __addr & PAGE_MASK; \
13351+ unsigned long __end_ao = __addr + __size - 1; \
13352+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
13353+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
13354+ while(__addr_ao <= __end_ao) { \
13355+ char __c_ao; \
13356+ __addr_ao += PAGE_SIZE; \
13357+ if (__size > PAGE_SIZE) \
13358+ cond_resched(); \
13359+ if (__get_user(__c_ao, (char __user *)__addr)) \
13360+ break; \
13361+ if (type != VERIFY_WRITE) { \
13362+ __addr = __addr_ao; \
13363+ continue; \
13364+ } \
13365+ if (__put_user(__c_ao, (char __user *)__addr)) \
13366+ break; \
13367+ __addr = __addr_ao; \
13368+ } \
13369+ } \
13370+ __ret_ao; \
13371+})
13372
13373 /*
13374 * The exception table consists of pairs of addresses: the first is the
13375@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
13376 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
13377 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
13378
13379-
13380+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
13381+#define __copyuser_seg "gs;"
13382+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
13383+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
13384+#else
13385+#define __copyuser_seg
13386+#define __COPYUSER_SET_ES
13387+#define __COPYUSER_RESTORE_ES
13388+#endif
13389
13390 #ifdef CONFIG_X86_32
13391 #define __put_user_asm_u64(x, addr, err, errret) \
13392- asm volatile("1: movl %%eax,0(%2)\n" \
13393- "2: movl %%edx,4(%2)\n" \
13394+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
13395+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
13396 "3:\n" \
13397 ".section .fixup,\"ax\"\n" \
13398 "4: movl %3,%0\n" \
13399@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
13400 : "A" (x), "r" (addr), "i" (errret), "0" (err))
13401
13402 #define __put_user_asm_ex_u64(x, addr) \
13403- asm volatile("1: movl %%eax,0(%1)\n" \
13404- "2: movl %%edx,4(%1)\n" \
13405+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
13406+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
13407 "3:\n" \
13408 _ASM_EXTABLE(1b, 2b - 1b) \
13409 _ASM_EXTABLE(2b, 3b - 2b) \
13410@@ -253,7 +295,7 @@ extern void __put_user_8(void);
13411 __typeof__(*(ptr)) __pu_val; \
13412 __chk_user_ptr(ptr); \
13413 might_fault(); \
13414- __pu_val = x; \
13415+ __pu_val = (x); \
13416 switch (sizeof(*(ptr))) { \
13417 case 1: \
13418 __put_user_x(1, __pu_val, ptr, __ret_pu); \
13419@@ -374,7 +416,7 @@ do { \
13420 } while (0)
13421
13422 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
13423- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
13424+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
13425 "2:\n" \
13426 ".section .fixup,\"ax\"\n" \
13427 "3: mov %3,%0\n" \
13428@@ -382,7 +424,7 @@ do { \
13429 " jmp 2b\n" \
13430 ".previous\n" \
13431 _ASM_EXTABLE(1b, 3b) \
13432- : "=r" (err), ltype(x) \
13433+ : "=r" (err), ltype (x) \
13434 : "m" (__m(addr)), "i" (errret), "0" (err))
13435
13436 #define __get_user_size_ex(x, ptr, size) \
13437@@ -407,7 +449,7 @@ do { \
13438 } while (0)
13439
13440 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
13441- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
13442+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
13443 "2:\n" \
13444 _ASM_EXTABLE(1b, 2b - 1b) \
13445 : ltype(x) : "m" (__m(addr)))
13446@@ -424,13 +466,24 @@ do { \
13447 int __gu_err; \
13448 unsigned long __gu_val; \
13449 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
13450- (x) = (__force __typeof__(*(ptr)))__gu_val; \
13451+ (x) = (__typeof__(*(ptr)))__gu_val; \
13452 __gu_err; \
13453 })
13454
13455 /* FIXME: this hack is definitely wrong -AK */
13456 struct __large_struct { unsigned long buf[100]; };
13457-#define __m(x) (*(struct __large_struct __user *)(x))
13458+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13459+#define ____m(x) \
13460+({ \
13461+ unsigned long ____x = (unsigned long)(x); \
13462+ if (____x < PAX_USER_SHADOW_BASE) \
13463+ ____x += PAX_USER_SHADOW_BASE; \
13464+ (void __user *)____x; \
13465+})
13466+#else
13467+#define ____m(x) (x)
13468+#endif
13469+#define __m(x) (*(struct __large_struct __user *)____m(x))
13470
13471 /*
13472 * Tell gcc we read from memory instead of writing: this is because
13473@@ -438,7 +491,7 @@ struct __large_struct { unsigned long buf[100]; };
13474 * aliasing issues.
13475 */
13476 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
13477- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
13478+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
13479 "2:\n" \
13480 ".section .fixup,\"ax\"\n" \
13481 "3: mov %3,%0\n" \
13482@@ -446,10 +499,10 @@ struct __large_struct { unsigned long buf[100]; };
13483 ".previous\n" \
13484 _ASM_EXTABLE(1b, 3b) \
13485 : "=r"(err) \
13486- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
13487+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
13488
13489 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
13490- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
13491+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
13492 "2:\n" \
13493 _ASM_EXTABLE(1b, 2b - 1b) \
13494 : : ltype(x), "m" (__m(addr)))
13495@@ -488,8 +541,12 @@ struct __large_struct { unsigned long buf[100]; };
13496 * On error, the variable @x is set to zero.
13497 */
13498
13499+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13500+#define __get_user(x, ptr) get_user((x), (ptr))
13501+#else
13502 #define __get_user(x, ptr) \
13503 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
13504+#endif
13505
13506 /**
13507 * __put_user: - Write a simple value into user space, with less checking.
13508@@ -511,8 +568,12 @@ struct __large_struct { unsigned long buf[100]; };
13509 * Returns zero on success, or -EFAULT on error.
13510 */
13511
13512+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13513+#define __put_user(x, ptr) put_user((x), (ptr))
13514+#else
13515 #define __put_user(x, ptr) \
13516 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
13517+#endif
13518
13519 #define __get_user_unaligned __get_user
13520 #define __put_user_unaligned __put_user
13521@@ -530,7 +591,7 @@ struct __large_struct { unsigned long buf[100]; };
13522 #define get_user_ex(x, ptr) do { \
13523 unsigned long __gue_val; \
13524 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
13525- (x) = (__force __typeof__(*(ptr)))__gue_val; \
13526+ (x) = (__typeof__(*(ptr)))__gue_val; \
13527 } while (0)
13528
13529 #ifdef CONFIG_X86_WP_WORKS_OK
13530@@ -567,6 +628,7 @@ extern struct movsl_mask {
13531
13532 #define ARCH_HAS_NOCACHE_UACCESS 1
13533
13534+#define ARCH_HAS_SORT_EXTABLE
13535 #ifdef CONFIG_X86_32
13536 # include "uaccess_32.h"
13537 #else
13538diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
13539index 632fb44..2a195ea 100644
13540--- a/arch/x86/include/asm/uaccess_32.h
13541+++ b/arch/x86/include/asm/uaccess_32.h
13542@@ -12,15 +12,15 @@
13543 #include <asm/page.h>
13544
13545 unsigned long __must_check __copy_to_user_ll
13546- (void __user *to, const void *from, unsigned long n);
13547+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
13548 unsigned long __must_check __copy_from_user_ll
13549- (void *to, const void __user *from, unsigned long n);
13550+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13551 unsigned long __must_check __copy_from_user_ll_nozero
13552- (void *to, const void __user *from, unsigned long n);
13553+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13554 unsigned long __must_check __copy_from_user_ll_nocache
13555- (void *to, const void __user *from, unsigned long n);
13556+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13557 unsigned long __must_check __copy_from_user_ll_nocache_nozero
13558- (void *to, const void __user *from, unsigned long n);
13559+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13560
13561 /**
13562 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
13563@@ -42,8 +42,15 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
13564 */
13565
13566 static __always_inline unsigned long __must_check
13567+__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) __size_overflow(3);
13568+static __always_inline unsigned long __must_check
13569 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
13570 {
13571+ pax_track_stack();
13572+
13573+ if ((long)n < 0)
13574+ return n;
13575+
13576 if (__builtin_constant_p(n)) {
13577 unsigned long ret;
13578
13579@@ -62,6 +69,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
13580 return ret;
13581 }
13582 }
13583+ if (!__builtin_constant_p(n))
13584+ check_object_size(from, n, true);
13585 return __copy_to_user_ll(to, from, n);
13586 }
13587
13588@@ -80,15 +89,23 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
13589 * On success, this will be zero.
13590 */
13591 static __always_inline unsigned long __must_check
13592+__copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
13593+static __always_inline unsigned long __must_check
13594 __copy_to_user(void __user *to, const void *from, unsigned long n)
13595 {
13596 might_fault();
13597+
13598 return __copy_to_user_inatomic(to, from, n);
13599 }
13600
13601 static __always_inline unsigned long
13602+__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) __size_overflow(3);
13603+static __always_inline unsigned long
13604 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
13605 {
13606+ if ((long)n < 0)
13607+ return n;
13608+
13609 /* Avoid zeroing the tail if the copy fails..
13610 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
13611 * but as the zeroing behaviour is only significant when n is not
13612@@ -135,9 +152,17 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
13613 * for explanation of why this is needed.
13614 */
13615 static __always_inline unsigned long
13616+__copy_from_user(void *to, const void __user *from, unsigned long n) __size_overflow(3);
13617+static __always_inline unsigned long
13618 __copy_from_user(void *to, const void __user *from, unsigned long n)
13619 {
13620 might_fault();
13621+
13622+ pax_track_stack();
13623+
13624+ if ((long)n < 0)
13625+ return n;
13626+
13627 if (__builtin_constant_p(n)) {
13628 unsigned long ret;
13629
13630@@ -153,13 +178,21 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
13631 return ret;
13632 }
13633 }
13634+ if (!__builtin_constant_p(n))
13635+ check_object_size(to, n, false);
13636 return __copy_from_user_ll(to, from, n);
13637 }
13638
13639 static __always_inline unsigned long __copy_from_user_nocache(void *to,
13640+ const void __user *from, unsigned long n) __size_overflow(3);
13641+static __always_inline unsigned long __copy_from_user_nocache(void *to,
13642 const void __user *from, unsigned long n)
13643 {
13644 might_fault();
13645+
13646+ if ((long)n < 0)
13647+ return n;
13648+
13649 if (__builtin_constant_p(n)) {
13650 unsigned long ret;
13651
13652@@ -180,20 +213,75 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
13653
13654 static __always_inline unsigned long
13655 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
13656+ unsigned long n) __size_overflow(3);
13657+static __always_inline unsigned long
13658+__copy_from_user_inatomic_nocache(void *to, const void __user *from,
13659 unsigned long n)
13660 {
13661- return __copy_from_user_ll_nocache_nozero(to, from, n);
13662+ if ((long)n < 0)
13663+ return n;
13664+
13665+ return __copy_from_user_ll_nocache_nozero(to, from, n);
13666+}
13667+
13668+/**
13669+ * copy_to_user: - Copy a block of data into user space.
13670+ * @to: Destination address, in user space.
13671+ * @from: Source address, in kernel space.
13672+ * @n: Number of bytes to copy.
13673+ *
13674+ * Context: User context only. This function may sleep.
13675+ *
13676+ * Copy data from kernel space to user space.
13677+ *
13678+ * Returns number of bytes that could not be copied.
13679+ * On success, this will be zero.
13680+ */
13681+static __always_inline unsigned long __must_check
13682+copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
13683+static __always_inline unsigned long __must_check
13684+copy_to_user(void __user *to, const void *from, unsigned long n)
13685+{
13686+ if (access_ok(VERIFY_WRITE, to, n))
13687+ n = __copy_to_user(to, from, n);
13688+ return n;
13689+}
13690+
13691+/**
13692+ * copy_from_user: - Copy a block of data from user space.
13693+ * @to: Destination address, in kernel space.
13694+ * @from: Source address, in user space.
13695+ * @n: Number of bytes to copy.
13696+ *
13697+ * Context: User context only. This function may sleep.
13698+ *
13699+ * Copy data from user space to kernel space.
13700+ *
13701+ * Returns number of bytes that could not be copied.
13702+ * On success, this will be zero.
13703+ *
13704+ * If some data could not be copied, this function will pad the copied
13705+ * data to the requested size using zero bytes.
13706+ */
13707+static __always_inline unsigned long __must_check
13708+copy_from_user(void *to, const void __user *from, unsigned long n) __size_overflow(3);
13709+static __always_inline unsigned long __must_check
13710+copy_from_user(void *to, const void __user *from, unsigned long n)
13711+{
13712+ if (access_ok(VERIFY_READ, from, n))
13713+ n = __copy_from_user(to, from, n);
13714+ else if ((long)n > 0) {
13715+ if (!__builtin_constant_p(n))
13716+ check_object_size(to, n, false);
13717+ memset(to, 0, n);
13718+ }
13719+ return n;
13720 }
13721
13722-unsigned long __must_check copy_to_user(void __user *to,
13723- const void *from, unsigned long n);
13724-unsigned long __must_check copy_from_user(void *to,
13725- const void __user *from,
13726- unsigned long n);
13727 long __must_check strncpy_from_user(char *dst, const char __user *src,
13728- long count);
13729+ unsigned long count) __size_overflow(3);
13730 long __must_check __strncpy_from_user(char *dst,
13731- const char __user *src, long count);
13732+ const char __user *src, unsigned long count) __size_overflow(3);
13733
13734 /**
13735 * strlen_user: - Get the size of a string in user space.
13736@@ -211,8 +299,8 @@ long __must_check __strncpy_from_user(char *dst,
13737 */
13738 #define strlen_user(str) strnlen_user(str, LONG_MAX)
13739
13740-long strnlen_user(const char __user *str, long n);
13741-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
13742-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
13743+long strnlen_user(const char __user *str, unsigned long n);
13744+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13745+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13746
13747 #endif /* _ASM_X86_UACCESS_32_H */
13748diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
13749index db24b21..d0d2413 100644
13750--- a/arch/x86/include/asm/uaccess_64.h
13751+++ b/arch/x86/include/asm/uaccess_64.h
13752@@ -9,6 +9,9 @@
13753 #include <linux/prefetch.h>
13754 #include <linux/lockdep.h>
13755 #include <asm/page.h>
13756+#include <asm/pgtable.h>
13757+
13758+#define set_fs(x) (current_thread_info()->addr_limit = (x))
13759
13760 /*
13761 * Copy To/From Userspace
13762@@ -16,116 +19,215 @@
13763
13764 /* Handles exceptions in both to and from, but doesn't do access_ok */
13765 __must_check unsigned long
13766-copy_user_generic(void *to, const void *from, unsigned len);
13767+copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
13768
13769 __must_check unsigned long
13770-copy_to_user(void __user *to, const void *from, unsigned len);
13771-__must_check unsigned long
13772-copy_from_user(void *to, const void __user *from, unsigned len);
13773-__must_check unsigned long
13774-copy_in_user(void __user *to, const void __user *from, unsigned len);
13775+copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
13776
13777 static __always_inline __must_check
13778-int __copy_from_user(void *dst, const void __user *src, unsigned size)
13779+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
13780+static __always_inline __must_check
13781+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
13782 {
13783- int ret = 0;
13784+ unsigned ret = 0;
13785
13786 might_fault();
13787- if (!__builtin_constant_p(size))
13788- return copy_user_generic(dst, (__force void *)src, size);
13789+
13790+ if (size > INT_MAX)
13791+ return size;
13792+
13793+#ifdef CONFIG_PAX_MEMORY_UDEREF
13794+ if (!__access_ok(VERIFY_READ, src, size))
13795+ return size;
13796+#endif
13797+
13798+ if (!__builtin_constant_p(size)) {
13799+ check_object_size(dst, size, false);
13800+
13801+#ifdef CONFIG_PAX_MEMORY_UDEREF
13802+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13803+ src += PAX_USER_SHADOW_BASE;
13804+#endif
13805+
13806+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13807+ }
13808 switch (size) {
13809- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
13810+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
13811 ret, "b", "b", "=q", 1);
13812 return ret;
13813- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
13814+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
13815 ret, "w", "w", "=r", 2);
13816 return ret;
13817- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
13818+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
13819 ret, "l", "k", "=r", 4);
13820 return ret;
13821- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
13822+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13823 ret, "q", "", "=r", 8);
13824 return ret;
13825 case 10:
13826- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13827+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13828 ret, "q", "", "=r", 10);
13829 if (unlikely(ret))
13830 return ret;
13831 __get_user_asm(*(u16 *)(8 + (char *)dst),
13832- (u16 __user *)(8 + (char __user *)src),
13833+ (const u16 __user *)(8 + (const char __user *)src),
13834 ret, "w", "w", "=r", 2);
13835 return ret;
13836 case 16:
13837- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13838+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13839 ret, "q", "", "=r", 16);
13840 if (unlikely(ret))
13841 return ret;
13842 __get_user_asm(*(u64 *)(8 + (char *)dst),
13843- (u64 __user *)(8 + (char __user *)src),
13844+ (const u64 __user *)(8 + (const char __user *)src),
13845 ret, "q", "", "=r", 8);
13846 return ret;
13847 default:
13848- return copy_user_generic(dst, (__force void *)src, size);
13849+
13850+#ifdef CONFIG_PAX_MEMORY_UDEREF
13851+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13852+ src += PAX_USER_SHADOW_BASE;
13853+#endif
13854+
13855+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13856 }
13857 }
13858
13859 static __always_inline __must_check
13860-int __copy_to_user(void __user *dst, const void *src, unsigned size)
13861+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
13862+static __always_inline __must_check
13863+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
13864 {
13865- int ret = 0;
13866+ unsigned ret = 0;
13867
13868 might_fault();
13869- if (!__builtin_constant_p(size))
13870- return copy_user_generic((__force void *)dst, src, size);
13871+
13872+ pax_track_stack();
13873+
13874+ if (size > INT_MAX)
13875+ return size;
13876+
13877+#ifdef CONFIG_PAX_MEMORY_UDEREF
13878+ if (!__access_ok(VERIFY_WRITE, dst, size))
13879+ return size;
13880+#endif
13881+
13882+ if (!__builtin_constant_p(size)) {
13883+ check_object_size(src, size, true);
13884+
13885+#ifdef CONFIG_PAX_MEMORY_UDEREF
13886+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13887+ dst += PAX_USER_SHADOW_BASE;
13888+#endif
13889+
13890+ return copy_user_generic((__force_kernel void *)dst, src, size);
13891+ }
13892 switch (size) {
13893- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13894+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
13895 ret, "b", "b", "iq", 1);
13896 return ret;
13897- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13898+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13899 ret, "w", "w", "ir", 2);
13900 return ret;
13901- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13902+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13903 ret, "l", "k", "ir", 4);
13904 return ret;
13905- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13906+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13907 ret, "q", "", "er", 8);
13908 return ret;
13909 case 10:
13910- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13911+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13912 ret, "q", "", "er", 10);
13913 if (unlikely(ret))
13914 return ret;
13915 asm("":::"memory");
13916- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13917+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13918 ret, "w", "w", "ir", 2);
13919 return ret;
13920 case 16:
13921- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13922+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13923 ret, "q", "", "er", 16);
13924 if (unlikely(ret))
13925 return ret;
13926 asm("":::"memory");
13927- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13928+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
13929 ret, "q", "", "er", 8);
13930 return ret;
13931 default:
13932- return copy_user_generic((__force void *)dst, src, size);
13933+
13934+#ifdef CONFIG_PAX_MEMORY_UDEREF
13935+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13936+ dst += PAX_USER_SHADOW_BASE;
13937+#endif
13938+
13939+ return copy_user_generic((__force_kernel void *)dst, src, size);
13940 }
13941 }
13942
13943 static __always_inline __must_check
13944-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13945+unsigned long copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
13946+static __always_inline __must_check
13947+unsigned long copy_to_user(void __user *to, const void *from, unsigned long len)
13948 {
13949- int ret = 0;
13950+ if (access_ok(VERIFY_WRITE, to, len))
13951+ len = __copy_to_user(to, from, len);
13952+ return len;
13953+}
13954
13955+static __always_inline __must_check
13956+unsigned long copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
13957+static __always_inline __must_check
13958+unsigned long copy_from_user(void *to, const void __user *from, unsigned long len)
13959+{
13960 might_fault();
13961- if (!__builtin_constant_p(size))
13962- return copy_user_generic((__force void *)dst,
13963- (__force void *)src, size);
13964+
13965+ if (access_ok(VERIFY_READ, from, len))
13966+ len = __copy_from_user(to, from, len);
13967+ else if (len < INT_MAX) {
13968+ if (!__builtin_constant_p(len))
13969+ check_object_size(to, len, false);
13970+ memset(to, 0, len);
13971+ }
13972+ return len;
13973+}
13974+
13975+static __always_inline __must_check
13976+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size) __size_overflow(3);
13977+static __always_inline __must_check
13978+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
13979+{
13980+ unsigned ret = 0;
13981+
13982+ might_fault();
13983+
13984+ pax_track_stack();
13985+
13986+ if (size > INT_MAX)
13987+ return size;
13988+
13989+#ifdef CONFIG_PAX_MEMORY_UDEREF
13990+ if (!__access_ok(VERIFY_READ, src, size))
13991+ return size;
13992+ if (!__access_ok(VERIFY_WRITE, dst, size))
13993+ return size;
13994+#endif
13995+
13996+ if (!__builtin_constant_p(size)) {
13997+
13998+#ifdef CONFIG_PAX_MEMORY_UDEREF
13999+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
14000+ src += PAX_USER_SHADOW_BASE;
14001+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
14002+ dst += PAX_USER_SHADOW_BASE;
14003+#endif
14004+
14005+ return copy_user_generic((__force_kernel void *)dst,
14006+ (__force_kernel const void *)src, size);
14007+ }
14008 switch (size) {
14009 case 1: {
14010 u8 tmp;
14011- __get_user_asm(tmp, (u8 __user *)src,
14012+ __get_user_asm(tmp, (const u8 __user *)src,
14013 ret, "b", "b", "=q", 1);
14014 if (likely(!ret))
14015 __put_user_asm(tmp, (u8 __user *)dst,
14016@@ -134,7 +236,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14017 }
14018 case 2: {
14019 u16 tmp;
14020- __get_user_asm(tmp, (u16 __user *)src,
14021+ __get_user_asm(tmp, (const u16 __user *)src,
14022 ret, "w", "w", "=r", 2);
14023 if (likely(!ret))
14024 __put_user_asm(tmp, (u16 __user *)dst,
14025@@ -144,7 +246,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14026
14027 case 4: {
14028 u32 tmp;
14029- __get_user_asm(tmp, (u32 __user *)src,
14030+ __get_user_asm(tmp, (const u32 __user *)src,
14031 ret, "l", "k", "=r", 4);
14032 if (likely(!ret))
14033 __put_user_asm(tmp, (u32 __user *)dst,
14034@@ -153,7 +255,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14035 }
14036 case 8: {
14037 u64 tmp;
14038- __get_user_asm(tmp, (u64 __user *)src,
14039+ __get_user_asm(tmp, (const u64 __user *)src,
14040 ret, "q", "", "=r", 8);
14041 if (likely(!ret))
14042 __put_user_asm(tmp, (u64 __user *)dst,
14043@@ -161,48 +263,105 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14044 return ret;
14045 }
14046 default:
14047- return copy_user_generic((__force void *)dst,
14048- (__force void *)src, size);
14049+
14050+#ifdef CONFIG_PAX_MEMORY_UDEREF
14051+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
14052+ src += PAX_USER_SHADOW_BASE;
14053+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
14054+ dst += PAX_USER_SHADOW_BASE;
14055+#endif
14056+
14057+ return copy_user_generic((__force_kernel void *)dst,
14058+ (__force_kernel const void *)src, size);
14059 }
14060 }
14061
14062 __must_check long
14063-strncpy_from_user(char *dst, const char __user *src, long count);
14064+strncpy_from_user(char *dst, const char __user *src, unsigned long count) __size_overflow(3);
14065 __must_check long
14066-__strncpy_from_user(char *dst, const char __user *src, long count);
14067-__must_check long strnlen_user(const char __user *str, long n);
14068-__must_check long __strnlen_user(const char __user *str, long n);
14069+__strncpy_from_user(char *dst, const char __user *src, unsigned long count) __size_overflow(3);
14070+__must_check long strnlen_user(const char __user *str, unsigned long n) __size_overflow(2);
14071+__must_check long __strnlen_user(const char __user *str, unsigned long n) __size_overflow(2);
14072 __must_check long strlen_user(const char __user *str);
14073-__must_check unsigned long clear_user(void __user *mem, unsigned long len);
14074-__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
14075+__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
14076+__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
14077
14078-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
14079- unsigned size);
14080+static __must_check __always_inline unsigned long
14081+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
14082+static __must_check __always_inline unsigned long
14083+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
14084+{
14085+ pax_track_stack();
14086+
14087+ if (size > INT_MAX)
14088+ return size;
14089+
14090+#ifdef CONFIG_PAX_MEMORY_UDEREF
14091+ if (!__access_ok(VERIFY_READ, src, size))
14092+ return size;
14093
14094-static __must_check __always_inline int
14095-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
14096+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
14097+ src += PAX_USER_SHADOW_BASE;
14098+#endif
14099+
14100+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
14101+}
14102+
14103+static __must_check __always_inline unsigned long
14104+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
14105+static __must_check __always_inline unsigned long
14106+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
14107 {
14108- return copy_user_generic((__force void *)dst, src, size);
14109+ if (size > INT_MAX)
14110+ return size;
14111+
14112+#ifdef CONFIG_PAX_MEMORY_UDEREF
14113+ if (!__access_ok(VERIFY_WRITE, dst, size))
14114+ return size;
14115+
14116+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
14117+ dst += PAX_USER_SHADOW_BASE;
14118+#endif
14119+
14120+ return copy_user_generic((__force_kernel void *)dst, src, size);
14121 }
14122
14123-extern long __copy_user_nocache(void *dst, const void __user *src,
14124- unsigned size, int zerorest);
14125+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
14126+ unsigned long size, int zerorest) __size_overflow(3);
14127
14128-static inline int
14129-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
14130+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
14131+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
14132 {
14133 might_sleep();
14134+
14135+ if (size > INT_MAX)
14136+ return size;
14137+
14138+#ifdef CONFIG_PAX_MEMORY_UDEREF
14139+ if (!__access_ok(VERIFY_READ, src, size))
14140+ return size;
14141+#endif
14142+
14143 return __copy_user_nocache(dst, src, size, 1);
14144 }
14145
14146-static inline int
14147-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
14148- unsigned size)
14149+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
14150+ unsigned long size) __size_overflow(3);
14151+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
14152+ unsigned long size)
14153 {
14154+ if (size > INT_MAX)
14155+ return size;
14156+
14157+#ifdef CONFIG_PAX_MEMORY_UDEREF
14158+ if (!__access_ok(VERIFY_READ, src, size))
14159+ return size;
14160+#endif
14161+
14162 return __copy_user_nocache(dst, src, size, 0);
14163 }
14164
14165-unsigned long
14166-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
14167+extern unsigned long
14168+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
14169
14170 #endif /* _ASM_X86_UACCESS_64_H */
14171diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
14172index 9064052..786cfbc 100644
14173--- a/arch/x86/include/asm/vdso.h
14174+++ b/arch/x86/include/asm/vdso.h
14175@@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
14176 #define VDSO32_SYMBOL(base, name) \
14177 ({ \
14178 extern const char VDSO32_##name[]; \
14179- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
14180+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
14181 })
14182 #endif
14183
14184diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
14185index 3d61e20..9507180 100644
14186--- a/arch/x86/include/asm/vgtod.h
14187+++ b/arch/x86/include/asm/vgtod.h
14188@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
14189 int sysctl_enabled;
14190 struct timezone sys_tz;
14191 struct { /* extract of a clocksource struct */
14192+ char name[8];
14193 cycle_t (*vread)(void);
14194 cycle_t cycle_last;
14195 cycle_t mask;
14196diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
14197index 61e08c0..b0da582 100644
14198--- a/arch/x86/include/asm/vmi.h
14199+++ b/arch/x86/include/asm/vmi.h
14200@@ -191,6 +191,7 @@ struct vrom_header {
14201 u8 reserved[96]; /* Reserved for headers */
14202 char vmi_init[8]; /* VMI_Init jump point */
14203 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
14204+ char rom_data[8048]; /* rest of the option ROM */
14205 } __attribute__((packed));
14206
14207 struct pnp_header {
14208diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
14209index c6e0bee..fcb9f74 100644
14210--- a/arch/x86/include/asm/vmi_time.h
14211+++ b/arch/x86/include/asm/vmi_time.h
14212@@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
14213 int (*wallclock_updated)(void);
14214 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
14215 void (*cancel_alarm)(u32 flags);
14216-} vmi_timer_ops;
14217+} __no_const vmi_timer_ops;
14218
14219 /* Prototypes */
14220 extern void __init vmi_time_init(void);
14221diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
14222index d0983d2..1f7c9e9 100644
14223--- a/arch/x86/include/asm/vsyscall.h
14224+++ b/arch/x86/include/asm/vsyscall.h
14225@@ -15,9 +15,10 @@ enum vsyscall_num {
14226
14227 #ifdef __KERNEL__
14228 #include <linux/seqlock.h>
14229+#include <linux/getcpu.h>
14230+#include <linux/time.h>
14231
14232 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
14233-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
14234
14235 /* Definitions for CONFIG_GENERIC_TIME definitions */
14236 #define __section_vsyscall_gtod_data __attribute__ \
14237@@ -31,7 +32,6 @@ enum vsyscall_num {
14238 #define VGETCPU_LSL 2
14239
14240 extern int __vgetcpu_mode;
14241-extern volatile unsigned long __jiffies;
14242
14243 /* kernel space (writeable) */
14244 extern int vgetcpu_mode;
14245@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
14246
14247 extern void map_vsyscall(void);
14248
14249+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
14250+extern time_t vtime(time_t *t);
14251+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
14252 #endif /* __KERNEL__ */
14253
14254 #endif /* _ASM_X86_VSYSCALL_H */
14255diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
14256index 2c756fd..3377e37 100644
14257--- a/arch/x86/include/asm/x86_init.h
14258+++ b/arch/x86/include/asm/x86_init.h
14259@@ -28,7 +28,7 @@ struct x86_init_mpparse {
14260 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
14261 void (*find_smp_config)(unsigned int reserve);
14262 void (*get_smp_config)(unsigned int early);
14263-};
14264+} __no_const;
14265
14266 /**
14267 * struct x86_init_resources - platform specific resource related ops
14268@@ -42,7 +42,7 @@ struct x86_init_resources {
14269 void (*probe_roms)(void);
14270 void (*reserve_resources)(void);
14271 char *(*memory_setup)(void);
14272-};
14273+} __no_const;
14274
14275 /**
14276 * struct x86_init_irqs - platform specific interrupt setup
14277@@ -55,7 +55,7 @@ struct x86_init_irqs {
14278 void (*pre_vector_init)(void);
14279 void (*intr_init)(void);
14280 void (*trap_init)(void);
14281-};
14282+} __no_const;
14283
14284 /**
14285 * struct x86_init_oem - oem platform specific customizing functions
14286@@ -65,7 +65,7 @@ struct x86_init_irqs {
14287 struct x86_init_oem {
14288 void (*arch_setup)(void);
14289 void (*banner)(void);
14290-};
14291+} __no_const;
14292
14293 /**
14294 * struct x86_init_paging - platform specific paging functions
14295@@ -75,7 +75,7 @@ struct x86_init_oem {
14296 struct x86_init_paging {
14297 void (*pagetable_setup_start)(pgd_t *base);
14298 void (*pagetable_setup_done)(pgd_t *base);
14299-};
14300+} __no_const;
14301
14302 /**
14303 * struct x86_init_timers - platform specific timer setup
14304@@ -88,7 +88,7 @@ struct x86_init_timers {
14305 void (*setup_percpu_clockev)(void);
14306 void (*tsc_pre_init)(void);
14307 void (*timer_init)(void);
14308-};
14309+} __no_const;
14310
14311 /**
14312 * struct x86_init_ops - functions for platform specific setup
14313@@ -101,7 +101,7 @@ struct x86_init_ops {
14314 struct x86_init_oem oem;
14315 struct x86_init_paging paging;
14316 struct x86_init_timers timers;
14317-};
14318+} __no_const;
14319
14320 /**
14321 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
14322@@ -109,7 +109,7 @@ struct x86_init_ops {
14323 */
14324 struct x86_cpuinit_ops {
14325 void (*setup_percpu_clockev)(void);
14326-};
14327+} __no_const;
14328
14329 /**
14330 * struct x86_platform_ops - platform specific runtime functions
14331@@ -121,7 +121,7 @@ struct x86_platform_ops {
14332 unsigned long (*calibrate_tsc)(void);
14333 unsigned long (*get_wallclock)(void);
14334 int (*set_wallclock)(unsigned long nowtime);
14335-};
14336+} __no_const;
14337
14338 extern struct x86_init_ops x86_init;
14339 extern struct x86_cpuinit_ops x86_cpuinit;
14340diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
14341index 727acc1..554f3eb 100644
14342--- a/arch/x86/include/asm/xsave.h
14343+++ b/arch/x86/include/asm/xsave.h
14344@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
14345 static inline int xsave_user(struct xsave_struct __user *buf)
14346 {
14347 int err;
14348+
14349+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14350+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
14351+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
14352+#endif
14353+
14354 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
14355 "2:\n"
14356 ".section .fixup,\"ax\"\n"
14357@@ -78,10 +84,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
14358 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
14359 {
14360 int err;
14361- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
14362+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
14363 u32 lmask = mask;
14364 u32 hmask = mask >> 32;
14365
14366+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14367+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
14368+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
14369+#endif
14370+
14371 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
14372 "2:\n"
14373 ".section .fixup,\"ax\"\n"
14374diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
14375index 6a564ac..9b1340c 100644
14376--- a/arch/x86/kernel/acpi/realmode/Makefile
14377+++ b/arch/x86/kernel/acpi/realmode/Makefile
14378@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
14379 $(call cc-option, -fno-stack-protector) \
14380 $(call cc-option, -mpreferred-stack-boundary=2)
14381 KBUILD_CFLAGS += $(call cc-option, -m32)
14382+ifdef CONSTIFY_PLUGIN
14383+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
14384+endif
14385 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
14386 GCOV_PROFILE := n
14387
14388diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
14389index 580b4e2..d4129e4 100644
14390--- a/arch/x86/kernel/acpi/realmode/wakeup.S
14391+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
14392@@ -91,6 +91,9 @@ _start:
14393 /* Do any other stuff... */
14394
14395 #ifndef CONFIG_64BIT
14396+ /* Recheck NX bit overrides (64bit path does this in trampoline) */
14397+ call verify_cpu
14398+
14399 /* This could also be done in C code... */
14400 movl pmode_cr3, %eax
14401 movl %eax, %cr3
14402@@ -104,7 +107,7 @@ _start:
14403 movl %eax, %ecx
14404 orl %edx, %ecx
14405 jz 1f
14406- movl $0xc0000080, %ecx
14407+ mov $MSR_EFER, %ecx
14408 wrmsr
14409 1:
14410
14411@@ -114,6 +117,7 @@ _start:
14412 movl pmode_cr0, %eax
14413 movl %eax, %cr0
14414 jmp pmode_return
14415+# include "../../verify_cpu.S"
14416 #else
14417 pushw $0
14418 pushw trampoline_segment
14419diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
14420index ca93638..7042f24 100644
14421--- a/arch/x86/kernel/acpi/sleep.c
14422+++ b/arch/x86/kernel/acpi/sleep.c
14423@@ -11,11 +11,12 @@
14424 #include <linux/cpumask.h>
14425 #include <asm/segment.h>
14426 #include <asm/desc.h>
14427+#include <asm/e820.h>
14428
14429 #include "realmode/wakeup.h"
14430 #include "sleep.h"
14431
14432-unsigned long acpi_wakeup_address;
14433+unsigned long acpi_wakeup_address = 0x2000;
14434 unsigned long acpi_realmode_flags;
14435
14436 /* address in low memory of the wakeup routine. */
14437@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
14438 #else /* CONFIG_64BIT */
14439 header->trampoline_segment = setup_trampoline() >> 4;
14440 #ifdef CONFIG_SMP
14441- stack_start.sp = temp_stack + sizeof(temp_stack);
14442+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
14443+
14444+ pax_open_kernel();
14445 early_gdt_descr.address =
14446 (unsigned long)get_cpu_gdt_table(smp_processor_id());
14447+ pax_close_kernel();
14448+
14449 initial_gs = per_cpu_offset(smp_processor_id());
14450 #endif
14451 initial_code = (unsigned long)wakeup_long64;
14452@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
14453 return;
14454 }
14455
14456- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
14457-
14458- if (!acpi_realmode) {
14459- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
14460- return;
14461- }
14462-
14463- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
14464+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
14465+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
14466 }
14467
14468
14469diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
14470index 8ded418..079961e 100644
14471--- a/arch/x86/kernel/acpi/wakeup_32.S
14472+++ b/arch/x86/kernel/acpi/wakeup_32.S
14473@@ -30,13 +30,11 @@ wakeup_pmode_return:
14474 # and restore the stack ... but you need gdt for this to work
14475 movl saved_context_esp, %esp
14476
14477- movl %cs:saved_magic, %eax
14478- cmpl $0x12345678, %eax
14479+ cmpl $0x12345678, saved_magic
14480 jne bogus_magic
14481
14482 # jump to place where we left off
14483- movl saved_eip, %eax
14484- jmp *%eax
14485+ jmp *(saved_eip)
14486
14487 bogus_magic:
14488 jmp bogus_magic
14489diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
14490index de7353c..075da5f 100644
14491--- a/arch/x86/kernel/alternative.c
14492+++ b/arch/x86/kernel/alternative.c
14493@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
14494
14495 BUG_ON(p->len > MAX_PATCH_LEN);
14496 /* prep the buffer with the original instructions */
14497- memcpy(insnbuf, p->instr, p->len);
14498+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
14499 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
14500 (unsigned long)p->instr, p->len);
14501
14502@@ -475,7 +475,7 @@ void __init alternative_instructions(void)
14503 if (smp_alt_once)
14504 free_init_pages("SMP alternatives",
14505 (unsigned long)__smp_locks,
14506- (unsigned long)__smp_locks_end);
14507+ PAGE_ALIGN((unsigned long)__smp_locks_end));
14508
14509 restart_nmi();
14510 }
14511@@ -492,13 +492,17 @@ void __init alternative_instructions(void)
14512 * instructions. And on the local CPU you need to be protected again NMI or MCE
14513 * handlers seeing an inconsistent instruction while you patch.
14514 */
14515-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
14516+static void *__kprobes text_poke_early(void *addr, const void *opcode,
14517 size_t len)
14518 {
14519 unsigned long flags;
14520 local_irq_save(flags);
14521- memcpy(addr, opcode, len);
14522+
14523+ pax_open_kernel();
14524+ memcpy(ktla_ktva(addr), opcode, len);
14525 sync_core();
14526+ pax_close_kernel();
14527+
14528 local_irq_restore(flags);
14529 /* Could also do a CLFLUSH here to speed up CPU recovery; but
14530 that causes hangs on some VIA CPUs. */
14531@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
14532 */
14533 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
14534 {
14535- unsigned long flags;
14536- char *vaddr;
14537+ unsigned char *vaddr = ktla_ktva(addr);
14538 struct page *pages[2];
14539- int i;
14540+ size_t i;
14541
14542 if (!core_kernel_text((unsigned long)addr)) {
14543- pages[0] = vmalloc_to_page(addr);
14544- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
14545+ pages[0] = vmalloc_to_page(vaddr);
14546+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
14547 } else {
14548- pages[0] = virt_to_page(addr);
14549+ pages[0] = virt_to_page(vaddr);
14550 WARN_ON(!PageReserved(pages[0]));
14551- pages[1] = virt_to_page(addr + PAGE_SIZE);
14552+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
14553 }
14554 BUG_ON(!pages[0]);
14555- local_irq_save(flags);
14556- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
14557- if (pages[1])
14558- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
14559- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
14560- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
14561- clear_fixmap(FIX_TEXT_POKE0);
14562- if (pages[1])
14563- clear_fixmap(FIX_TEXT_POKE1);
14564- local_flush_tlb();
14565- sync_core();
14566- /* Could also do a CLFLUSH here to speed up CPU recovery; but
14567- that causes hangs on some VIA CPUs. */
14568+ text_poke_early(addr, opcode, len);
14569 for (i = 0; i < len; i++)
14570- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
14571- local_irq_restore(flags);
14572+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
14573 return addr;
14574 }
14575diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
14576index 3a44b75..1601800 100644
14577--- a/arch/x86/kernel/amd_iommu.c
14578+++ b/arch/x86/kernel/amd_iommu.c
14579@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
14580 }
14581 }
14582
14583-static struct dma_map_ops amd_iommu_dma_ops = {
14584+static const struct dma_map_ops amd_iommu_dma_ops = {
14585 .alloc_coherent = alloc_coherent,
14586 .free_coherent = free_coherent,
14587 .map_page = map_page,
14588diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
14589index 1d2d670..8e3f477 100644
14590--- a/arch/x86/kernel/apic/apic.c
14591+++ b/arch/x86/kernel/apic/apic.c
14592@@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
14593 /*
14594 * Debug level, exported for io_apic.c
14595 */
14596-unsigned int apic_verbosity;
14597+int apic_verbosity;
14598
14599 int pic_mode;
14600
14601@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
14602 apic_write(APIC_ESR, 0);
14603 v1 = apic_read(APIC_ESR);
14604 ack_APIC_irq();
14605- atomic_inc(&irq_err_count);
14606+ atomic_inc_unchecked(&irq_err_count);
14607
14608 /*
14609 * Here is what the APIC error bits mean:
14610@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
14611 u16 *bios_cpu_apicid;
14612 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
14613
14614+ pax_track_stack();
14615+
14616 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
14617 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
14618
14619diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
14620index 8928d97..f799cea 100644
14621--- a/arch/x86/kernel/apic/io_apic.c
14622+++ b/arch/x86/kernel/apic/io_apic.c
14623@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
14624 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
14625 GFP_ATOMIC);
14626 if (!ioapic_entries)
14627- return 0;
14628+ return NULL;
14629
14630 for (apic = 0; apic < nr_ioapics; apic++) {
14631 ioapic_entries[apic] =
14632@@ -733,7 +733,7 @@ nomem:
14633 kfree(ioapic_entries[apic]);
14634 kfree(ioapic_entries);
14635
14636- return 0;
14637+ return NULL;
14638 }
14639
14640 /*
14641@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
14642 }
14643 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
14644
14645-void lock_vector_lock(void)
14646+void lock_vector_lock(void) __acquires(vector_lock)
14647 {
14648 /* Used to the online set of cpus does not change
14649 * during assign_irq_vector.
14650@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
14651 spin_lock(&vector_lock);
14652 }
14653
14654-void unlock_vector_lock(void)
14655+void unlock_vector_lock(void) __releases(vector_lock)
14656 {
14657 spin_unlock(&vector_lock);
14658 }
14659@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
14660 ack_APIC_irq();
14661 }
14662
14663-atomic_t irq_mis_count;
14664+atomic_unchecked_t irq_mis_count;
14665
14666 static void ack_apic_level(unsigned int irq)
14667 {
14668@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
14669
14670 /* Tail end of version 0x11 I/O APIC bug workaround */
14671 if (!(v & (1 << (i & 0x1f)))) {
14672- atomic_inc(&irq_mis_count);
14673+ atomic_inc_unchecked(&irq_mis_count);
14674 spin_lock(&ioapic_lock);
14675 __mask_and_edge_IO_APIC_irq(cfg);
14676 __unmask_and_level_IO_APIC_irq(cfg);
14677diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
14678index 151ace6..f317474 100644
14679--- a/arch/x86/kernel/apm_32.c
14680+++ b/arch/x86/kernel/apm_32.c
14681@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
14682 * This is for buggy BIOS's that refer to (real mode) segment 0x40
14683 * even though they are called in protected mode.
14684 */
14685-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
14686+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
14687 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
14688
14689 static const char driver_version[] = "1.16ac"; /* no spaces */
14690@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
14691 BUG_ON(cpu != 0);
14692 gdt = get_cpu_gdt_table(cpu);
14693 save_desc_40 = gdt[0x40 / 8];
14694+
14695+ pax_open_kernel();
14696 gdt[0x40 / 8] = bad_bios_desc;
14697+ pax_close_kernel();
14698
14699 apm_irq_save(flags);
14700 APM_DO_SAVE_SEGS;
14701@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
14702 &call->esi);
14703 APM_DO_RESTORE_SEGS;
14704 apm_irq_restore(flags);
14705+
14706+ pax_open_kernel();
14707 gdt[0x40 / 8] = save_desc_40;
14708+ pax_close_kernel();
14709+
14710 put_cpu();
14711
14712 return call->eax & 0xff;
14713@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
14714 BUG_ON(cpu != 0);
14715 gdt = get_cpu_gdt_table(cpu);
14716 save_desc_40 = gdt[0x40 / 8];
14717+
14718+ pax_open_kernel();
14719 gdt[0x40 / 8] = bad_bios_desc;
14720+ pax_close_kernel();
14721
14722 apm_irq_save(flags);
14723 APM_DO_SAVE_SEGS;
14724@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
14725 &call->eax);
14726 APM_DO_RESTORE_SEGS;
14727 apm_irq_restore(flags);
14728+
14729+ pax_open_kernel();
14730 gdt[0x40 / 8] = save_desc_40;
14731+ pax_close_kernel();
14732+
14733 put_cpu();
14734 return error;
14735 }
14736@@ -975,7 +989,7 @@ recalc:
14737
14738 static void apm_power_off(void)
14739 {
14740- unsigned char po_bios_call[] = {
14741+ const unsigned char po_bios_call[] = {
14742 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
14743 0x8e, 0xd0, /* movw ax,ss */
14744 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
14745@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
14746 * code to that CPU.
14747 */
14748 gdt = get_cpu_gdt_table(0);
14749+
14750+ pax_open_kernel();
14751 set_desc_base(&gdt[APM_CS >> 3],
14752 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
14753 set_desc_base(&gdt[APM_CS_16 >> 3],
14754 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
14755 set_desc_base(&gdt[APM_DS >> 3],
14756 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
14757+ pax_close_kernel();
14758
14759 proc_create("apm", 0, NULL, &apm_file_ops);
14760
14761diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
14762index dfdbf64..9b2b6ce 100644
14763--- a/arch/x86/kernel/asm-offsets_32.c
14764+++ b/arch/x86/kernel/asm-offsets_32.c
14765@@ -51,7 +51,6 @@ void foo(void)
14766 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
14767 BLANK();
14768
14769- OFFSET(TI_task, thread_info, task);
14770 OFFSET(TI_exec_domain, thread_info, exec_domain);
14771 OFFSET(TI_flags, thread_info, flags);
14772 OFFSET(TI_status, thread_info, status);
14773@@ -60,6 +59,8 @@ void foo(void)
14774 OFFSET(TI_restart_block, thread_info, restart_block);
14775 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
14776 OFFSET(TI_cpu, thread_info, cpu);
14777+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
14778+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14779 BLANK();
14780
14781 OFFSET(GDS_size, desc_ptr, size);
14782@@ -99,6 +100,7 @@ void foo(void)
14783
14784 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14785 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
14786+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14787 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
14788 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
14789 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
14790@@ -115,6 +117,11 @@ void foo(void)
14791 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
14792 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14793 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14794+
14795+#ifdef CONFIG_PAX_KERNEXEC
14796+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14797+#endif
14798+
14799 #endif
14800
14801 #ifdef CONFIG_XEN
14802diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
14803index 4a6aeed..371de20 100644
14804--- a/arch/x86/kernel/asm-offsets_64.c
14805+++ b/arch/x86/kernel/asm-offsets_64.c
14806@@ -44,6 +44,8 @@ int main(void)
14807 ENTRY(addr_limit);
14808 ENTRY(preempt_count);
14809 ENTRY(status);
14810+ ENTRY(lowest_stack);
14811+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14812 #ifdef CONFIG_IA32_EMULATION
14813 ENTRY(sysenter_return);
14814 #endif
14815@@ -63,6 +65,18 @@ int main(void)
14816 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14817 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
14818 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
14819+
14820+#ifdef CONFIG_PAX_KERNEXEC
14821+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14822+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14823+#endif
14824+
14825+#ifdef CONFIG_PAX_MEMORY_UDEREF
14826+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
14827+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
14828+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
14829+#endif
14830+
14831 #endif
14832
14833
14834@@ -115,6 +129,7 @@ int main(void)
14835 ENTRY(cr8);
14836 BLANK();
14837 #undef ENTRY
14838+ DEFINE(TSS_size, sizeof(struct tss_struct));
14839 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
14840 BLANK();
14841 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
14842@@ -130,6 +145,7 @@ int main(void)
14843
14844 BLANK();
14845 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14846+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14847 #ifdef CONFIG_XEN
14848 BLANK();
14849 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
14850diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
14851index ff502cc..dc5133e 100644
14852--- a/arch/x86/kernel/cpu/Makefile
14853+++ b/arch/x86/kernel/cpu/Makefile
14854@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
14855 CFLAGS_REMOVE_common.o = -pg
14856 endif
14857
14858-# Make sure load_percpu_segment has no stackprotector
14859-nostackp := $(call cc-option, -fno-stack-protector)
14860-CFLAGS_common.o := $(nostackp)
14861-
14862 obj-y := intel_cacheinfo.o addon_cpuid_features.o
14863 obj-y += proc.o capflags.o powerflags.o common.o
14864 obj-y += vmware.o hypervisor.o sched.o
14865diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
14866index 6e082dc..a0b5f36 100644
14867--- a/arch/x86/kernel/cpu/amd.c
14868+++ b/arch/x86/kernel/cpu/amd.c
14869@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
14870 unsigned int size)
14871 {
14872 /* AMD errata T13 (order #21922) */
14873- if ((c->x86 == 6)) {
14874+ if (c->x86 == 6) {
14875 /* Duron Rev A0 */
14876 if (c->x86_model == 3 && c->x86_mask == 0)
14877 size = 64;
14878diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
14879index 4e34d10..ba6bc97 100644
14880--- a/arch/x86/kernel/cpu/common.c
14881+++ b/arch/x86/kernel/cpu/common.c
14882@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
14883
14884 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14885
14886-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14887-#ifdef CONFIG_X86_64
14888- /*
14889- * We need valid kernel segments for data and code in long mode too
14890- * IRET will check the segment types kkeil 2000/10/28
14891- * Also sysret mandates a special GDT layout
14892- *
14893- * TLS descriptors are currently at a different place compared to i386.
14894- * Hopefully nobody expects them at a fixed place (Wine?)
14895- */
14896- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14897- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14898- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14899- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14900- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14901- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
14902-#else
14903- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14904- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14905- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14906- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
14907- /*
14908- * Segments used for calling PnP BIOS have byte granularity.
14909- * They code segments and data segments have fixed 64k limits,
14910- * the transfer segment sizes are set at run time.
14911- */
14912- /* 32-bit code */
14913- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14914- /* 16-bit code */
14915- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14916- /* 16-bit data */
14917- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
14918- /* 16-bit data */
14919- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
14920- /* 16-bit data */
14921- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
14922- /*
14923- * The APM segments have byte granularity and their bases
14924- * are set at run time. All have 64k limits.
14925- */
14926- /* 32-bit code */
14927- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14928- /* 16-bit code */
14929- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14930- /* data */
14931- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
14932-
14933- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14934- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14935- GDT_STACK_CANARY_INIT
14936-#endif
14937-} };
14938-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
14939-
14940 static int __init x86_xsave_setup(char *s)
14941 {
14942 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
14943@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
14944 {
14945 struct desc_ptr gdt_descr;
14946
14947- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
14948+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14949 gdt_descr.size = GDT_SIZE - 1;
14950 load_gdt(&gdt_descr);
14951 /* Reload the per-cpu base */
14952@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
14953 /* Filter out anything that depends on CPUID levels we don't have */
14954 filter_cpuid_features(c, true);
14955
14956+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14957+ setup_clear_cpu_cap(X86_FEATURE_SEP);
14958+#endif
14959+
14960 /* If the model name is still unset, do table lookup. */
14961 if (!c->x86_model_id[0]) {
14962 const char *p;
14963@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(char *arg)
14964 }
14965 __setup("clearcpuid=", setup_disablecpuid);
14966
14967+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
14968+EXPORT_PER_CPU_SYMBOL(current_tinfo);
14969+
14970 #ifdef CONFIG_X86_64
14971 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
14972
14973@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
14974 EXPORT_PER_CPU_SYMBOL(current_task);
14975
14976 DEFINE_PER_CPU(unsigned long, kernel_stack) =
14977- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
14978+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
14979 EXPORT_PER_CPU_SYMBOL(kernel_stack);
14980
14981 DEFINE_PER_CPU(char *, irq_stack_ptr) =
14982@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
14983 {
14984 memset(regs, 0, sizeof(struct pt_regs));
14985 regs->fs = __KERNEL_PERCPU;
14986- regs->gs = __KERNEL_STACK_CANARY;
14987+ savesegment(gs, regs->gs);
14988
14989 return regs;
14990 }
14991@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
14992 int i;
14993
14994 cpu = stack_smp_processor_id();
14995- t = &per_cpu(init_tss, cpu);
14996+ t = init_tss + cpu;
14997 orig_ist = &per_cpu(orig_ist, cpu);
14998
14999 #ifdef CONFIG_NUMA
15000@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
15001 switch_to_new_gdt(cpu);
15002 loadsegment(fs, 0);
15003
15004- load_idt((const struct desc_ptr *)&idt_descr);
15005+ load_idt(&idt_descr);
15006
15007 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
15008 syscall_init();
15009@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
15010 wrmsrl(MSR_KERNEL_GS_BASE, 0);
15011 barrier();
15012
15013- check_efer();
15014 if (cpu != 0)
15015 enable_x2apic();
15016
15017@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
15018 {
15019 int cpu = smp_processor_id();
15020 struct task_struct *curr = current;
15021- struct tss_struct *t = &per_cpu(init_tss, cpu);
15022+ struct tss_struct *t = init_tss + cpu;
15023 struct thread_struct *thread = &curr->thread;
15024
15025 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
15026diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
15027index 6a77cca..4f4fca0 100644
15028--- a/arch/x86/kernel/cpu/intel.c
15029+++ b/arch/x86/kernel/cpu/intel.c
15030@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
15031 * Update the IDT descriptor and reload the IDT so that
15032 * it uses the read-only mapped virtual address.
15033 */
15034- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
15035+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
15036 load_idt(&idt_descr);
15037 }
15038 #endif
15039diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
15040index 417990f..96dc36b 100644
15041--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
15042+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
15043@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
15044 return ret;
15045 }
15046
15047-static struct sysfs_ops sysfs_ops = {
15048+static const struct sysfs_ops sysfs_ops = {
15049 .show = show,
15050 .store = store,
15051 };
15052diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
15053index 472763d..aa4d686 100644
15054--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
15055+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
15056@@ -178,6 +178,8 @@ static void raise_mce(struct mce *m)
15057
15058 /* Error injection interface */
15059 static ssize_t mce_write(struct file *filp, const char __user *ubuf,
15060+ size_t usize, loff_t *off) __size_overflow(3);
15061+static ssize_t mce_write(struct file *filp, const char __user *ubuf,
15062 size_t usize, loff_t *off)
15063 {
15064 struct mce m;
15065@@ -211,7 +213,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
15066 static int inject_init(void)
15067 {
15068 printk(KERN_INFO "Machine check injector initialized\n");
15069- mce_chrdev_ops.write = mce_write;
15070+ pax_open_kernel();
15071+ *(void **)&mce_chrdev_ops.write = mce_write;
15072+ pax_close_kernel();
15073 register_die_notifier(&mce_raise_nb);
15074 return 0;
15075 }
15076diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
15077index 0f16a2b..21740f5 100644
15078--- a/arch/x86/kernel/cpu/mcheck/mce.c
15079+++ b/arch/x86/kernel/cpu/mcheck/mce.c
15080@@ -43,6 +43,7 @@
15081 #include <asm/ipi.h>
15082 #include <asm/mce.h>
15083 #include <asm/msr.h>
15084+#include <asm/local.h>
15085
15086 #include "mce-internal.h"
15087
15088@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
15089 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
15090 m->cs, m->ip);
15091
15092- if (m->cs == __KERNEL_CS)
15093+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
15094 print_symbol("{%s}", m->ip);
15095 pr_cont("\n");
15096 }
15097@@ -221,10 +222,10 @@ static void print_mce_tail(void)
15098
15099 #define PANIC_TIMEOUT 5 /* 5 seconds */
15100
15101-static atomic_t mce_paniced;
15102+static atomic_unchecked_t mce_paniced;
15103
15104 static int fake_panic;
15105-static atomic_t mce_fake_paniced;
15106+static atomic_unchecked_t mce_fake_paniced;
15107
15108 /* Panic in progress. Enable interrupts and wait for final IPI */
15109 static void wait_for_panic(void)
15110@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
15111 /*
15112 * Make sure only one CPU runs in machine check panic
15113 */
15114- if (atomic_inc_return(&mce_paniced) > 1)
15115+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
15116 wait_for_panic();
15117 barrier();
15118
15119@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
15120 console_verbose();
15121 } else {
15122 /* Don't log too much for fake panic */
15123- if (atomic_inc_return(&mce_fake_paniced) > 1)
15124+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
15125 return;
15126 }
15127 print_mce_head();
15128@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
15129 * might have been modified by someone else.
15130 */
15131 rmb();
15132- if (atomic_read(&mce_paniced))
15133+ if (atomic_read_unchecked(&mce_paniced))
15134 wait_for_panic();
15135 if (!monarch_timeout)
15136 goto out;
15137@@ -1394,7 +1395,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
15138 }
15139
15140 /* Call the installed machine check handler for this CPU setup. */
15141-void (*machine_check_vector)(struct pt_regs *, long error_code) =
15142+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
15143 unexpected_machine_check;
15144
15145 /*
15146@@ -1416,7 +1417,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
15147 return;
15148 }
15149
15150+ pax_open_kernel();
15151 machine_check_vector = do_machine_check;
15152+ pax_close_kernel();
15153
15154 mce_init();
15155 mce_cpu_features(c);
15156@@ -1429,14 +1432,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
15157 */
15158
15159 static DEFINE_SPINLOCK(mce_state_lock);
15160-static int open_count; /* #times opened */
15161+static local_t open_count; /* #times opened */
15162 static int open_exclu; /* already open exclusive? */
15163
15164 static int mce_open(struct inode *inode, struct file *file)
15165 {
15166 spin_lock(&mce_state_lock);
15167
15168- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
15169+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
15170 spin_unlock(&mce_state_lock);
15171
15172 return -EBUSY;
15173@@ -1444,7 +1447,7 @@ static int mce_open(struct inode *inode, struct file *file)
15174
15175 if (file->f_flags & O_EXCL)
15176 open_exclu = 1;
15177- open_count++;
15178+ local_inc(&open_count);
15179
15180 spin_unlock(&mce_state_lock);
15181
15182@@ -1455,7 +1458,7 @@ static int mce_release(struct inode *inode, struct file *file)
15183 {
15184 spin_lock(&mce_state_lock);
15185
15186- open_count--;
15187+ local_dec(&open_count);
15188 open_exclu = 0;
15189
15190 spin_unlock(&mce_state_lock);
15191@@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
15192 static void mce_reset(void)
15193 {
15194 cpu_missing = 0;
15195- atomic_set(&mce_fake_paniced, 0);
15196+ atomic_set_unchecked(&mce_fake_paniced, 0);
15197 atomic_set(&mce_executing, 0);
15198 atomic_set(&mce_callin, 0);
15199 atomic_set(&global_nwo, 0);
15200diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
15201index ef3cd31..9d2f6ab 100644
15202--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
15203+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
15204@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
15205 return ret;
15206 }
15207
15208-static struct sysfs_ops threshold_ops = {
15209+static const struct sysfs_ops threshold_ops = {
15210 .show = show,
15211 .store = store,
15212 };
15213diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
15214index 5c0e653..0882b0a 100644
15215--- a/arch/x86/kernel/cpu/mcheck/p5.c
15216+++ b/arch/x86/kernel/cpu/mcheck/p5.c
15217@@ -12,6 +12,7 @@
15218 #include <asm/system.h>
15219 #include <asm/mce.h>
15220 #include <asm/msr.h>
15221+#include <asm/pgtable.h>
15222
15223 /* By default disabled */
15224 int mce_p5_enabled __read_mostly;
15225@@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
15226 if (!cpu_has(c, X86_FEATURE_MCE))
15227 return;
15228
15229+ pax_open_kernel();
15230 machine_check_vector = pentium_machine_check;
15231+ pax_close_kernel();
15232 /* Make sure the vector pointer is visible before we enable MCEs: */
15233 wmb();
15234
15235diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
15236index 54060f5..c1a7577 100644
15237--- a/arch/x86/kernel/cpu/mcheck/winchip.c
15238+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
15239@@ -11,6 +11,7 @@
15240 #include <asm/system.h>
15241 #include <asm/mce.h>
15242 #include <asm/msr.h>
15243+#include <asm/pgtable.h>
15244
15245 /* Machine check handler for WinChip C6: */
15246 static void winchip_machine_check(struct pt_regs *regs, long error_code)
15247@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
15248 {
15249 u32 lo, hi;
15250
15251+ pax_open_kernel();
15252 machine_check_vector = winchip_machine_check;
15253+ pax_close_kernel();
15254 /* Make sure the vector pointer is visible before we enable MCEs: */
15255 wmb();
15256
15257diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
15258index 33af141..92ba9cd 100644
15259--- a/arch/x86/kernel/cpu/mtrr/amd.c
15260+++ b/arch/x86/kernel/cpu/mtrr/amd.c
15261@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
15262 return 0;
15263 }
15264
15265-static struct mtrr_ops amd_mtrr_ops = {
15266+static const struct mtrr_ops amd_mtrr_ops = {
15267 .vendor = X86_VENDOR_AMD,
15268 .set = amd_set_mtrr,
15269 .get = amd_get_mtrr,
15270diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
15271index de89f14..316fe3e 100644
15272--- a/arch/x86/kernel/cpu/mtrr/centaur.c
15273+++ b/arch/x86/kernel/cpu/mtrr/centaur.c
15274@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
15275 return 0;
15276 }
15277
15278-static struct mtrr_ops centaur_mtrr_ops = {
15279+static const struct mtrr_ops centaur_mtrr_ops = {
15280 .vendor = X86_VENDOR_CENTAUR,
15281 .set = centaur_set_mcr,
15282 .get = centaur_get_mcr,
15283diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
15284index 228d982..68a3343 100644
15285--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
15286+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
15287@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
15288 post_set();
15289 }
15290
15291-static struct mtrr_ops cyrix_mtrr_ops = {
15292+static const struct mtrr_ops cyrix_mtrr_ops = {
15293 .vendor = X86_VENDOR_CYRIX,
15294 .set_all = cyrix_set_all,
15295 .set = cyrix_set_arr,
15296diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
15297index 55da0c5..4d75584 100644
15298--- a/arch/x86/kernel/cpu/mtrr/generic.c
15299+++ b/arch/x86/kernel/cpu/mtrr/generic.c
15300@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
15301 /*
15302 * Generic structure...
15303 */
15304-struct mtrr_ops generic_mtrr_ops = {
15305+const struct mtrr_ops generic_mtrr_ops = {
15306 .use_intel_if = 1,
15307 .set_all = generic_set_all,
15308 .get = generic_get_mtrr,
15309diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
15310index 3c1b12d..454f6b6 100644
15311--- a/arch/x86/kernel/cpu/mtrr/if.c
15312+++ b/arch/x86/kernel/cpu/mtrr/if.c
15313@@ -89,6 +89,8 @@ mtrr_file_del(unsigned long base, unsigned long size,
15314 * "base=%Lx size=%Lx type=%s" or "disable=%d"
15315 */
15316 static ssize_t
15317+mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) __size_overflow(3);
15318+static ssize_t
15319 mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
15320 {
15321 int i, err;
15322diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
15323index fd60f09..c94ef52 100644
15324--- a/arch/x86/kernel/cpu/mtrr/main.c
15325+++ b/arch/x86/kernel/cpu/mtrr/main.c
15326@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
15327 u64 size_or_mask, size_and_mask;
15328 static bool mtrr_aps_delayed_init;
15329
15330-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
15331+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
15332
15333-struct mtrr_ops *mtrr_if;
15334+const struct mtrr_ops *mtrr_if;
15335
15336 static void set_mtrr(unsigned int reg, unsigned long base,
15337 unsigned long size, mtrr_type type);
15338
15339-void set_mtrr_ops(struct mtrr_ops *ops)
15340+void set_mtrr_ops(const struct mtrr_ops *ops)
15341 {
15342 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
15343 mtrr_ops[ops->vendor] = ops;
15344diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
15345index a501dee..816c719 100644
15346--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
15347+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
15348@@ -25,14 +25,14 @@ struct mtrr_ops {
15349 int (*validate_add_page)(unsigned long base, unsigned long size,
15350 unsigned int type);
15351 int (*have_wrcomb)(void);
15352-};
15353+} __do_const;
15354
15355 extern int generic_get_free_region(unsigned long base, unsigned long size,
15356 int replace_reg);
15357 extern int generic_validate_add_page(unsigned long base, unsigned long size,
15358 unsigned int type);
15359
15360-extern struct mtrr_ops generic_mtrr_ops;
15361+extern const struct mtrr_ops generic_mtrr_ops;
15362
15363 extern int positive_have_wrcomb(void);
15364
15365@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
15366 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
15367 void get_mtrr_state(void);
15368
15369-extern void set_mtrr_ops(struct mtrr_ops *ops);
15370+extern void set_mtrr_ops(const struct mtrr_ops *ops);
15371
15372 extern u64 size_or_mask, size_and_mask;
15373-extern struct mtrr_ops *mtrr_if;
15374+extern const struct mtrr_ops *mtrr_if;
15375
15376 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
15377 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
15378diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
15379index 0ff02ca..fc49a60 100644
15380--- a/arch/x86/kernel/cpu/perf_event.c
15381+++ b/arch/x86/kernel/cpu/perf_event.c
15382@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
15383 * count to the generic event atomically:
15384 */
15385 again:
15386- prev_raw_count = atomic64_read(&hwc->prev_count);
15387+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
15388 rdmsrl(hwc->event_base + idx, new_raw_count);
15389
15390- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
15391+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
15392 new_raw_count) != prev_raw_count)
15393 goto again;
15394
15395@@ -741,7 +741,7 @@ again:
15396 delta = (new_raw_count << shift) - (prev_raw_count << shift);
15397 delta >>= shift;
15398
15399- atomic64_add(delta, &event->count);
15400+ atomic64_add_unchecked(delta, &event->count);
15401 atomic64_sub(delta, &hwc->period_left);
15402
15403 return new_raw_count;
15404@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
15405 * The hw event starts counting from this event offset,
15406 * mark it to be able to extra future deltas:
15407 */
15408- atomic64_set(&hwc->prev_count, (u64)-left);
15409+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
15410
15411 err = checking_wrmsrl(hwc->event_base + idx,
15412 (u64)(-left) & x86_pmu.event_mask);
15413@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
15414 break;
15415
15416 callchain_store(entry, frame.return_address);
15417- fp = frame.next_frame;
15418+ fp = (__force const void __user *)frame.next_frame;
15419 }
15420 }
15421
15422diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
15423index 898df97..9e82503 100644
15424--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
15425+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
15426@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
15427
15428 /* Interface defining a CPU specific perfctr watchdog */
15429 struct wd_ops {
15430- int (*reserve)(void);
15431- void (*unreserve)(void);
15432- int (*setup)(unsigned nmi_hz);
15433- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
15434- void (*stop)(void);
15435+ int (* const reserve)(void);
15436+ void (* const unreserve)(void);
15437+ int (* const setup)(unsigned nmi_hz);
15438+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
15439+ void (* const stop)(void);
15440 unsigned perfctr;
15441 unsigned evntsel;
15442 u64 checkbit;
15443@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
15444 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
15445 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
15446
15447+/* cannot be const */
15448 static struct wd_ops intel_arch_wd_ops;
15449
15450 static int setup_intel_arch_watchdog(unsigned nmi_hz)
15451@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
15452 return 1;
15453 }
15454
15455+/* cannot be const */
15456 static struct wd_ops intel_arch_wd_ops __read_mostly = {
15457 .reserve = single_msr_reserve,
15458 .unreserve = single_msr_unreserve,
15459diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
15460index ff95824..2ffdcb5 100644
15461--- a/arch/x86/kernel/crash.c
15462+++ b/arch/x86/kernel/crash.c
15463@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
15464 regs = args->regs;
15465
15466 #ifdef CONFIG_X86_32
15467- if (!user_mode_vm(regs)) {
15468+ if (!user_mode(regs)) {
15469 crash_fixup_ss_esp(&fixed_regs, regs);
15470 regs = &fixed_regs;
15471 }
15472diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
15473index 37250fe..bf2ec74 100644
15474--- a/arch/x86/kernel/doublefault_32.c
15475+++ b/arch/x86/kernel/doublefault_32.c
15476@@ -11,7 +11,7 @@
15477
15478 #define DOUBLEFAULT_STACKSIZE (1024)
15479 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
15480-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
15481+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
15482
15483 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
15484
15485@@ -21,7 +21,7 @@ static void doublefault_fn(void)
15486 unsigned long gdt, tss;
15487
15488 store_gdt(&gdt_desc);
15489- gdt = gdt_desc.address;
15490+ gdt = (unsigned long)gdt_desc.address;
15491
15492 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
15493
15494@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
15495 /* 0x2 bit is always set */
15496 .flags = X86_EFLAGS_SF | 0x2,
15497 .sp = STACK_START,
15498- .es = __USER_DS,
15499+ .es = __KERNEL_DS,
15500 .cs = __KERNEL_CS,
15501 .ss = __KERNEL_DS,
15502- .ds = __USER_DS,
15503+ .ds = __KERNEL_DS,
15504 .fs = __KERNEL_PERCPU,
15505
15506 .__cr3 = __pa_nodebug(swapper_pg_dir),
15507diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
15508index 2d8a371..4fa6ae6 100644
15509--- a/arch/x86/kernel/dumpstack.c
15510+++ b/arch/x86/kernel/dumpstack.c
15511@@ -2,6 +2,9 @@
15512 * Copyright (C) 1991, 1992 Linus Torvalds
15513 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
15514 */
15515+#ifdef CONFIG_GRKERNSEC_HIDESYM
15516+#define __INCLUDED_BY_HIDESYM 1
15517+#endif
15518 #include <linux/kallsyms.h>
15519 #include <linux/kprobes.h>
15520 #include <linux/uaccess.h>
15521@@ -28,7 +31,7 @@ static int die_counter;
15522
15523 void printk_address(unsigned long address, int reliable)
15524 {
15525- printk(" [<%p>] %s%pS\n", (void *) address,
15526+ printk(" [<%p>] %s%pA\n", (void *) address,
15527 reliable ? "" : "? ", (void *) address);
15528 }
15529
15530@@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
15531 static void
15532 print_ftrace_graph_addr(unsigned long addr, void *data,
15533 const struct stacktrace_ops *ops,
15534- struct thread_info *tinfo, int *graph)
15535+ struct task_struct *task, int *graph)
15536 {
15537- struct task_struct *task = tinfo->task;
15538 unsigned long ret_addr;
15539 int index = task->curr_ret_stack;
15540
15541@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
15542 static inline void
15543 print_ftrace_graph_addr(unsigned long addr, void *data,
15544 const struct stacktrace_ops *ops,
15545- struct thread_info *tinfo, int *graph)
15546+ struct task_struct *task, int *graph)
15547 { }
15548 #endif
15549
15550@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
15551 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
15552 */
15553
15554-static inline int valid_stack_ptr(struct thread_info *tinfo,
15555- void *p, unsigned int size, void *end)
15556+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
15557 {
15558- void *t = tinfo;
15559 if (end) {
15560 if (p < end && p >= (end-THREAD_SIZE))
15561 return 1;
15562@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
15563 }
15564
15565 unsigned long
15566-print_context_stack(struct thread_info *tinfo,
15567+print_context_stack(struct task_struct *task, void *stack_start,
15568 unsigned long *stack, unsigned long bp,
15569 const struct stacktrace_ops *ops, void *data,
15570 unsigned long *end, int *graph)
15571 {
15572 struct stack_frame *frame = (struct stack_frame *)bp;
15573
15574- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
15575+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
15576 unsigned long addr;
15577
15578 addr = *stack;
15579@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
15580 } else {
15581 ops->address(data, addr, 0);
15582 }
15583- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
15584+ print_ftrace_graph_addr(addr, data, ops, task, graph);
15585 }
15586 stack++;
15587 }
15588@@ -180,7 +180,7 @@ void dump_stack(void)
15589 #endif
15590
15591 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
15592- current->pid, current->comm, print_tainted(),
15593+ task_pid_nr(current), current->comm, print_tainted(),
15594 init_utsname()->release,
15595 (int)strcspn(init_utsname()->version, " "),
15596 init_utsname()->version);
15597@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
15598 return flags;
15599 }
15600
15601+extern void gr_handle_kernel_exploit(void);
15602+
15603 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
15604 {
15605 if (regs && kexec_should_crash(current))
15606@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
15607 panic("Fatal exception in interrupt");
15608 if (panic_on_oops)
15609 panic("Fatal exception");
15610- do_exit(signr);
15611+
15612+ gr_handle_kernel_exploit();
15613+
15614+ do_group_exit(signr);
15615 }
15616
15617 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
15618@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
15619 unsigned long flags = oops_begin();
15620 int sig = SIGSEGV;
15621
15622- if (!user_mode_vm(regs))
15623+ if (!user_mode(regs))
15624 report_bug(regs->ip, regs);
15625
15626 if (__die(str, regs, err))
15627diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
15628index 81086c2..13e8b17 100644
15629--- a/arch/x86/kernel/dumpstack.h
15630+++ b/arch/x86/kernel/dumpstack.h
15631@@ -15,7 +15,7 @@
15632 #endif
15633
15634 extern unsigned long
15635-print_context_stack(struct thread_info *tinfo,
15636+print_context_stack(struct task_struct *task, void *stack_start,
15637 unsigned long *stack, unsigned long bp,
15638 const struct stacktrace_ops *ops, void *data,
15639 unsigned long *end, int *graph);
15640diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
15641index f7dd2a7..504f53b 100644
15642--- a/arch/x86/kernel/dumpstack_32.c
15643+++ b/arch/x86/kernel/dumpstack_32.c
15644@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15645 #endif
15646
15647 for (;;) {
15648- struct thread_info *context;
15649+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
15650+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
15651
15652- context = (struct thread_info *)
15653- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
15654- bp = print_context_stack(context, stack, bp, ops,
15655- data, NULL, &graph);
15656-
15657- stack = (unsigned long *)context->previous_esp;
15658- if (!stack)
15659+ if (stack_start == task_stack_page(task))
15660 break;
15661+ stack = *(unsigned long **)stack_start;
15662 if (ops->stack(data, "IRQ") < 0)
15663 break;
15664 touch_nmi_watchdog();
15665@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
15666 * When in-kernel, we also print out the stack and code at the
15667 * time of the fault..
15668 */
15669- if (!user_mode_vm(regs)) {
15670+ if (!user_mode(regs)) {
15671 unsigned int code_prologue = code_bytes * 43 / 64;
15672 unsigned int code_len = code_bytes;
15673 unsigned char c;
15674 u8 *ip;
15675+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
15676
15677 printk(KERN_EMERG "Stack:\n");
15678 show_stack_log_lvl(NULL, regs, &regs->sp,
15679@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
15680
15681 printk(KERN_EMERG "Code: ");
15682
15683- ip = (u8 *)regs->ip - code_prologue;
15684+ ip = (u8 *)regs->ip - code_prologue + cs_base;
15685 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
15686 /* try starting at IP */
15687- ip = (u8 *)regs->ip;
15688+ ip = (u8 *)regs->ip + cs_base;
15689 code_len = code_len - code_prologue + 1;
15690 }
15691 for (i = 0; i < code_len; i++, ip++) {
15692@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
15693 printk(" Bad EIP value.");
15694 break;
15695 }
15696- if (ip == (u8 *)regs->ip)
15697+ if (ip == (u8 *)regs->ip + cs_base)
15698 printk("<%02x> ", c);
15699 else
15700 printk("%02x ", c);
15701@@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
15702 printk("\n");
15703 }
15704
15705+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15706+void pax_check_alloca(unsigned long size)
15707+{
15708+ unsigned long sp = (unsigned long)&sp, stack_left;
15709+
15710+ /* all kernel stacks are of the same size */
15711+ stack_left = sp & (THREAD_SIZE - 1);
15712+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
15713+}
15714+EXPORT_SYMBOL(pax_check_alloca);
15715+#endif
15716+
15717 int is_valid_bugaddr(unsigned long ip)
15718 {
15719 unsigned short ud2;
15720
15721+ ip = ktla_ktva(ip);
15722 if (ip < PAGE_OFFSET)
15723 return 0;
15724 if (probe_kernel_address((unsigned short *)ip, ud2))
15725diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
15726index a071e6b..36cd585 100644
15727--- a/arch/x86/kernel/dumpstack_64.c
15728+++ b/arch/x86/kernel/dumpstack_64.c
15729@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15730 unsigned long *irq_stack_end =
15731 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
15732 unsigned used = 0;
15733- struct thread_info *tinfo;
15734 int graph = 0;
15735+ void *stack_start;
15736
15737 if (!task)
15738 task = current;
15739@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15740 * current stack address. If the stacks consist of nested
15741 * exceptions
15742 */
15743- tinfo = task_thread_info(task);
15744 for (;;) {
15745 char *id;
15746 unsigned long *estack_end;
15747+
15748 estack_end = in_exception_stack(cpu, (unsigned long)stack,
15749 &used, &id);
15750
15751@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15752 if (ops->stack(data, id) < 0)
15753 break;
15754
15755- bp = print_context_stack(tinfo, stack, bp, ops,
15756+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
15757 data, estack_end, &graph);
15758 ops->stack(data, "<EOE>");
15759 /*
15760@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15761 if (stack >= irq_stack && stack < irq_stack_end) {
15762 if (ops->stack(data, "IRQ") < 0)
15763 break;
15764- bp = print_context_stack(tinfo, stack, bp,
15765+ bp = print_context_stack(task, irq_stack, stack, bp,
15766 ops, data, irq_stack_end, &graph);
15767 /*
15768 * We link to the next stack (which would be
15769@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15770 /*
15771 * This handles the process stack:
15772 */
15773- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
15774+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
15775+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
15776 put_cpu();
15777 }
15778 EXPORT_SYMBOL(dump_trace);
15779@@ -304,3 +305,50 @@ int is_valid_bugaddr(unsigned long ip)
15780 return ud2 == 0x0b0f;
15781 }
15782
15783+
15784+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15785+void pax_check_alloca(unsigned long size)
15786+{
15787+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
15788+ unsigned cpu, used;
15789+ char *id;
15790+
15791+ /* check the process stack first */
15792+ stack_start = (unsigned long)task_stack_page(current);
15793+ stack_end = stack_start + THREAD_SIZE;
15794+ if (likely(stack_start <= sp && sp < stack_end)) {
15795+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
15796+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
15797+ return;
15798+ }
15799+
15800+ cpu = get_cpu();
15801+
15802+ /* check the irq stacks */
15803+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
15804+ stack_start = stack_end - IRQ_STACK_SIZE;
15805+ if (stack_start <= sp && sp < stack_end) {
15806+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
15807+ put_cpu();
15808+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
15809+ return;
15810+ }
15811+
15812+ /* check the exception stacks */
15813+ used = 0;
15814+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
15815+ stack_start = stack_end - EXCEPTION_STKSZ;
15816+ if (stack_end && stack_start <= sp && sp < stack_end) {
15817+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
15818+ put_cpu();
15819+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
15820+ return;
15821+ }
15822+
15823+ put_cpu();
15824+
15825+ /* unknown stack */
15826+ BUG();
15827+}
15828+EXPORT_SYMBOL(pax_check_alloca);
15829+#endif
15830diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
15831index a89739a..95e0c48 100644
15832--- a/arch/x86/kernel/e820.c
15833+++ b/arch/x86/kernel/e820.c
15834@@ -733,7 +733,7 @@ struct early_res {
15835 };
15836 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
15837 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
15838- {}
15839+ { 0, 0, {0}, 0 }
15840 };
15841
15842 static int __init find_overlapped_early(u64 start, u64 end)
15843diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
15844index b9c830c..1e41a96 100644
15845--- a/arch/x86/kernel/early_printk.c
15846+++ b/arch/x86/kernel/early_printk.c
15847@@ -7,6 +7,7 @@
15848 #include <linux/pci_regs.h>
15849 #include <linux/pci_ids.h>
15850 #include <linux/errno.h>
15851+#include <linux/sched.h>
15852 #include <asm/io.h>
15853 #include <asm/processor.h>
15854 #include <asm/fcntl.h>
15855@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
15856 int n;
15857 va_list ap;
15858
15859+ pax_track_stack();
15860+
15861 va_start(ap, fmt);
15862 n = vscnprintf(buf, sizeof(buf), fmt, ap);
15863 early_console->write(early_console, buf, n);
15864diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
15865index 5cab48e..b025f9b 100644
15866--- a/arch/x86/kernel/efi_32.c
15867+++ b/arch/x86/kernel/efi_32.c
15868@@ -38,70 +38,56 @@
15869 */
15870
15871 static unsigned long efi_rt_eflags;
15872-static pgd_t efi_bak_pg_dir_pointer[2];
15873+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
15874
15875-void efi_call_phys_prelog(void)
15876+void __init efi_call_phys_prelog(void)
15877 {
15878- unsigned long cr4;
15879- unsigned long temp;
15880 struct desc_ptr gdt_descr;
15881
15882+#ifdef CONFIG_PAX_KERNEXEC
15883+ struct desc_struct d;
15884+#endif
15885+
15886 local_irq_save(efi_rt_eflags);
15887
15888- /*
15889- * If I don't have PAE, I should just duplicate two entries in page
15890- * directory. If I have PAE, I just need to duplicate one entry in
15891- * page directory.
15892- */
15893- cr4 = read_cr4_safe();
15894-
15895- if (cr4 & X86_CR4_PAE) {
15896- efi_bak_pg_dir_pointer[0].pgd =
15897- swapper_pg_dir[pgd_index(0)].pgd;
15898- swapper_pg_dir[0].pgd =
15899- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
15900- } else {
15901- efi_bak_pg_dir_pointer[0].pgd =
15902- swapper_pg_dir[pgd_index(0)].pgd;
15903- efi_bak_pg_dir_pointer[1].pgd =
15904- swapper_pg_dir[pgd_index(0x400000)].pgd;
15905- swapper_pg_dir[pgd_index(0)].pgd =
15906- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
15907- temp = PAGE_OFFSET + 0x400000;
15908- swapper_pg_dir[pgd_index(0x400000)].pgd =
15909- swapper_pg_dir[pgd_index(temp)].pgd;
15910- }
15911+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
15912+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15913+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
15914
15915 /*
15916 * After the lock is released, the original page table is restored.
15917 */
15918 __flush_tlb_all();
15919
15920+#ifdef CONFIG_PAX_KERNEXEC
15921+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
15922+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
15923+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
15924+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
15925+#endif
15926+
15927 gdt_descr.address = __pa(get_cpu_gdt_table(0));
15928 gdt_descr.size = GDT_SIZE - 1;
15929 load_gdt(&gdt_descr);
15930 }
15931
15932-void efi_call_phys_epilog(void)
15933+void __init efi_call_phys_epilog(void)
15934 {
15935- unsigned long cr4;
15936 struct desc_ptr gdt_descr;
15937
15938+#ifdef CONFIG_PAX_KERNEXEC
15939+ struct desc_struct d;
15940+
15941+ memset(&d, 0, sizeof d);
15942+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
15943+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
15944+#endif
15945+
15946 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
15947 gdt_descr.size = GDT_SIZE - 1;
15948 load_gdt(&gdt_descr);
15949
15950- cr4 = read_cr4_safe();
15951-
15952- if (cr4 & X86_CR4_PAE) {
15953- swapper_pg_dir[pgd_index(0)].pgd =
15954- efi_bak_pg_dir_pointer[0].pgd;
15955- } else {
15956- swapper_pg_dir[pgd_index(0)].pgd =
15957- efi_bak_pg_dir_pointer[0].pgd;
15958- swapper_pg_dir[pgd_index(0x400000)].pgd =
15959- efi_bak_pg_dir_pointer[1].pgd;
15960- }
15961+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
15962
15963 /*
15964 * After the lock is released, the original page table is restored.
15965diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
15966index fbe66e6..c5c0dd2 100644
15967--- a/arch/x86/kernel/efi_stub_32.S
15968+++ b/arch/x86/kernel/efi_stub_32.S
15969@@ -6,7 +6,9 @@
15970 */
15971
15972 #include <linux/linkage.h>
15973+#include <linux/init.h>
15974 #include <asm/page_types.h>
15975+#include <asm/segment.h>
15976
15977 /*
15978 * efi_call_phys(void *, ...) is a function with variable parameters.
15979@@ -20,7 +22,7 @@
15980 * service functions will comply with gcc calling convention, too.
15981 */
15982
15983-.text
15984+__INIT
15985 ENTRY(efi_call_phys)
15986 /*
15987 * 0. The function can only be called in Linux kernel. So CS has been
15988@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
15989 * The mapping of lower virtual memory has been created in prelog and
15990 * epilog.
15991 */
15992- movl $1f, %edx
15993- subl $__PAGE_OFFSET, %edx
15994- jmp *%edx
15995+ movl $(__KERNEXEC_EFI_DS), %edx
15996+ mov %edx, %ds
15997+ mov %edx, %es
15998+ mov %edx, %ss
15999+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
16000 1:
16001
16002 /*
16003@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
16004 * parameter 2, ..., param n. To make things easy, we save the return
16005 * address of efi_call_phys in a global variable.
16006 */
16007- popl %edx
16008- movl %edx, saved_return_addr
16009- /* get the function pointer into ECX*/
16010- popl %ecx
16011- movl %ecx, efi_rt_function_ptr
16012- movl $2f, %edx
16013- subl $__PAGE_OFFSET, %edx
16014- pushl %edx
16015+ popl (saved_return_addr)
16016+ popl (efi_rt_function_ptr)
16017
16018 /*
16019 * 3. Clear PG bit in %CR0.
16020@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
16021 /*
16022 * 5. Call the physical function.
16023 */
16024- jmp *%ecx
16025+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
16026
16027-2:
16028 /*
16029 * 6. After EFI runtime service returns, control will return to
16030 * following instruction. We'd better readjust stack pointer first.
16031@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
16032 movl %cr0, %edx
16033 orl $0x80000000, %edx
16034 movl %edx, %cr0
16035- jmp 1f
16036-1:
16037+
16038 /*
16039 * 8. Now restore the virtual mode from flat mode by
16040 * adding EIP with PAGE_OFFSET.
16041 */
16042- movl $1f, %edx
16043- jmp *%edx
16044+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
16045 1:
16046+ movl $(__KERNEL_DS), %edx
16047+ mov %edx, %ds
16048+ mov %edx, %es
16049+ mov %edx, %ss
16050
16051 /*
16052 * 9. Balance the stack. And because EAX contain the return value,
16053 * we'd better not clobber it.
16054 */
16055- leal efi_rt_function_ptr, %edx
16056- movl (%edx), %ecx
16057- pushl %ecx
16058+ pushl (efi_rt_function_ptr)
16059
16060 /*
16061- * 10. Push the saved return address onto the stack and return.
16062+ * 10. Return to the saved return address.
16063 */
16064- leal saved_return_addr, %edx
16065- movl (%edx), %ecx
16066- pushl %ecx
16067- ret
16068+ jmpl *(saved_return_addr)
16069 ENDPROC(efi_call_phys)
16070 .previous
16071
16072-.data
16073+__INITDATA
16074 saved_return_addr:
16075 .long 0
16076 efi_rt_function_ptr:
16077diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
16078index 4c07cca..2c8427d 100644
16079--- a/arch/x86/kernel/efi_stub_64.S
16080+++ b/arch/x86/kernel/efi_stub_64.S
16081@@ -7,6 +7,7 @@
16082 */
16083
16084 #include <linux/linkage.h>
16085+#include <asm/alternative-asm.h>
16086
16087 #define SAVE_XMM \
16088 mov %rsp, %rax; \
16089@@ -40,6 +41,7 @@ ENTRY(efi_call0)
16090 call *%rdi
16091 addq $32, %rsp
16092 RESTORE_XMM
16093+ pax_force_retaddr 0, 1
16094 ret
16095 ENDPROC(efi_call0)
16096
16097@@ -50,6 +52,7 @@ ENTRY(efi_call1)
16098 call *%rdi
16099 addq $32, %rsp
16100 RESTORE_XMM
16101+ pax_force_retaddr 0, 1
16102 ret
16103 ENDPROC(efi_call1)
16104
16105@@ -60,6 +63,7 @@ ENTRY(efi_call2)
16106 call *%rdi
16107 addq $32, %rsp
16108 RESTORE_XMM
16109+ pax_force_retaddr 0, 1
16110 ret
16111 ENDPROC(efi_call2)
16112
16113@@ -71,6 +75,7 @@ ENTRY(efi_call3)
16114 call *%rdi
16115 addq $32, %rsp
16116 RESTORE_XMM
16117+ pax_force_retaddr 0, 1
16118 ret
16119 ENDPROC(efi_call3)
16120
16121@@ -83,6 +88,7 @@ ENTRY(efi_call4)
16122 call *%rdi
16123 addq $32, %rsp
16124 RESTORE_XMM
16125+ pax_force_retaddr 0, 1
16126 ret
16127 ENDPROC(efi_call4)
16128
16129@@ -96,6 +102,7 @@ ENTRY(efi_call5)
16130 call *%rdi
16131 addq $48, %rsp
16132 RESTORE_XMM
16133+ pax_force_retaddr 0, 1
16134 ret
16135 ENDPROC(efi_call5)
16136
16137@@ -112,5 +119,6 @@ ENTRY(efi_call6)
16138 call *%rdi
16139 addq $48, %rsp
16140 RESTORE_XMM
16141+ pax_force_retaddr 0, 1
16142 ret
16143 ENDPROC(efi_call6)
16144diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
16145index c097e7d..c689cf4 100644
16146--- a/arch/x86/kernel/entry_32.S
16147+++ b/arch/x86/kernel/entry_32.S
16148@@ -185,13 +185,146 @@
16149 /*CFI_REL_OFFSET gs, PT_GS*/
16150 .endm
16151 .macro SET_KERNEL_GS reg
16152+
16153+#ifdef CONFIG_CC_STACKPROTECTOR
16154 movl $(__KERNEL_STACK_CANARY), \reg
16155+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16156+ movl $(__USER_DS), \reg
16157+#else
16158+ xorl \reg, \reg
16159+#endif
16160+
16161 movl \reg, %gs
16162 .endm
16163
16164 #endif /* CONFIG_X86_32_LAZY_GS */
16165
16166-.macro SAVE_ALL
16167+.macro pax_enter_kernel
16168+#ifdef CONFIG_PAX_KERNEXEC
16169+ call pax_enter_kernel
16170+#endif
16171+.endm
16172+
16173+.macro pax_exit_kernel
16174+#ifdef CONFIG_PAX_KERNEXEC
16175+ call pax_exit_kernel
16176+#endif
16177+.endm
16178+
16179+#ifdef CONFIG_PAX_KERNEXEC
16180+ENTRY(pax_enter_kernel)
16181+#ifdef CONFIG_PARAVIRT
16182+ pushl %eax
16183+ pushl %ecx
16184+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
16185+ mov %eax, %esi
16186+#else
16187+ mov %cr0, %esi
16188+#endif
16189+ bts $16, %esi
16190+ jnc 1f
16191+ mov %cs, %esi
16192+ cmp $__KERNEL_CS, %esi
16193+ jz 3f
16194+ ljmp $__KERNEL_CS, $3f
16195+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
16196+2:
16197+#ifdef CONFIG_PARAVIRT
16198+ mov %esi, %eax
16199+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
16200+#else
16201+ mov %esi, %cr0
16202+#endif
16203+3:
16204+#ifdef CONFIG_PARAVIRT
16205+ popl %ecx
16206+ popl %eax
16207+#endif
16208+ ret
16209+ENDPROC(pax_enter_kernel)
16210+
16211+ENTRY(pax_exit_kernel)
16212+#ifdef CONFIG_PARAVIRT
16213+ pushl %eax
16214+ pushl %ecx
16215+#endif
16216+ mov %cs, %esi
16217+ cmp $__KERNEXEC_KERNEL_CS, %esi
16218+ jnz 2f
16219+#ifdef CONFIG_PARAVIRT
16220+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
16221+ mov %eax, %esi
16222+#else
16223+ mov %cr0, %esi
16224+#endif
16225+ btr $16, %esi
16226+ ljmp $__KERNEL_CS, $1f
16227+1:
16228+#ifdef CONFIG_PARAVIRT
16229+ mov %esi, %eax
16230+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
16231+#else
16232+ mov %esi, %cr0
16233+#endif
16234+2:
16235+#ifdef CONFIG_PARAVIRT
16236+ popl %ecx
16237+ popl %eax
16238+#endif
16239+ ret
16240+ENDPROC(pax_exit_kernel)
16241+#endif
16242+
16243+.macro pax_erase_kstack
16244+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16245+ call pax_erase_kstack
16246+#endif
16247+.endm
16248+
16249+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16250+/*
16251+ * ebp: thread_info
16252+ * ecx, edx: can be clobbered
16253+ */
16254+ENTRY(pax_erase_kstack)
16255+ pushl %edi
16256+ pushl %eax
16257+
16258+ mov TI_lowest_stack(%ebp), %edi
16259+ mov $-0xBEEF, %eax
16260+ std
16261+
16262+1: mov %edi, %ecx
16263+ and $THREAD_SIZE_asm - 1, %ecx
16264+ shr $2, %ecx
16265+ repne scasl
16266+ jecxz 2f
16267+
16268+ cmp $2*16, %ecx
16269+ jc 2f
16270+
16271+ mov $2*16, %ecx
16272+ repe scasl
16273+ jecxz 2f
16274+ jne 1b
16275+
16276+2: cld
16277+ mov %esp, %ecx
16278+ sub %edi, %ecx
16279+ shr $2, %ecx
16280+ rep stosl
16281+
16282+ mov TI_task_thread_sp0(%ebp), %edi
16283+ sub $128, %edi
16284+ mov %edi, TI_lowest_stack(%ebp)
16285+
16286+ popl %eax
16287+ popl %edi
16288+ ret
16289+ENDPROC(pax_erase_kstack)
16290+#endif
16291+
16292+.macro __SAVE_ALL _DS
16293 cld
16294 PUSH_GS
16295 pushl %fs
16296@@ -224,7 +357,7 @@
16297 pushl %ebx
16298 CFI_ADJUST_CFA_OFFSET 4
16299 CFI_REL_OFFSET ebx, 0
16300- movl $(__USER_DS), %edx
16301+ movl $\_DS, %edx
16302 movl %edx, %ds
16303 movl %edx, %es
16304 movl $(__KERNEL_PERCPU), %edx
16305@@ -232,6 +365,15 @@
16306 SET_KERNEL_GS %edx
16307 .endm
16308
16309+.macro SAVE_ALL
16310+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
16311+ __SAVE_ALL __KERNEL_DS
16312+ pax_enter_kernel
16313+#else
16314+ __SAVE_ALL __USER_DS
16315+#endif
16316+.endm
16317+
16318 .macro RESTORE_INT_REGS
16319 popl %ebx
16320 CFI_ADJUST_CFA_OFFSET -4
16321@@ -331,7 +473,7 @@ ENTRY(ret_from_fork)
16322 CFI_ADJUST_CFA_OFFSET -4
16323 jmp syscall_exit
16324 CFI_ENDPROC
16325-END(ret_from_fork)
16326+ENDPROC(ret_from_fork)
16327
16328 /*
16329 * Return to user mode is not as complex as all this looks,
16330@@ -352,7 +494,15 @@ check_userspace:
16331 movb PT_CS(%esp), %al
16332 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
16333 cmpl $USER_RPL, %eax
16334+
16335+#ifdef CONFIG_PAX_KERNEXEC
16336+ jae resume_userspace
16337+
16338+ PAX_EXIT_KERNEL
16339+ jmp resume_kernel
16340+#else
16341 jb resume_kernel # not returning to v8086 or userspace
16342+#endif
16343
16344 ENTRY(resume_userspace)
16345 LOCKDEP_SYS_EXIT
16346@@ -364,8 +514,8 @@ ENTRY(resume_userspace)
16347 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
16348 # int/exception return?
16349 jne work_pending
16350- jmp restore_all
16351-END(ret_from_exception)
16352+ jmp restore_all_pax
16353+ENDPROC(ret_from_exception)
16354
16355 #ifdef CONFIG_PREEMPT
16356 ENTRY(resume_kernel)
16357@@ -380,7 +530,7 @@ need_resched:
16358 jz restore_all
16359 call preempt_schedule_irq
16360 jmp need_resched
16361-END(resume_kernel)
16362+ENDPROC(resume_kernel)
16363 #endif
16364 CFI_ENDPROC
16365
16366@@ -414,25 +564,36 @@ sysenter_past_esp:
16367 /*CFI_REL_OFFSET cs, 0*/
16368 /*
16369 * Push current_thread_info()->sysenter_return to the stack.
16370- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
16371- * pushed above; +8 corresponds to copy_thread's esp0 setting.
16372 */
16373- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
16374+ pushl $0
16375 CFI_ADJUST_CFA_OFFSET 4
16376 CFI_REL_OFFSET eip, 0
16377
16378 pushl %eax
16379 CFI_ADJUST_CFA_OFFSET 4
16380 SAVE_ALL
16381+ GET_THREAD_INFO(%ebp)
16382+ movl TI_sysenter_return(%ebp),%ebp
16383+ movl %ebp,PT_EIP(%esp)
16384 ENABLE_INTERRUPTS(CLBR_NONE)
16385
16386 /*
16387 * Load the potential sixth argument from user stack.
16388 * Careful about security.
16389 */
16390+ movl PT_OLDESP(%esp),%ebp
16391+
16392+#ifdef CONFIG_PAX_MEMORY_UDEREF
16393+ mov PT_OLDSS(%esp),%ds
16394+1: movl %ds:(%ebp),%ebp
16395+ push %ss
16396+ pop %ds
16397+#else
16398 cmpl $__PAGE_OFFSET-3,%ebp
16399 jae syscall_fault
16400 1: movl (%ebp),%ebp
16401+#endif
16402+
16403 movl %ebp,PT_EBP(%esp)
16404 .section __ex_table,"a"
16405 .align 4
16406@@ -455,12 +616,24 @@ sysenter_do_call:
16407 testl $_TIF_ALLWORK_MASK, %ecx
16408 jne sysexit_audit
16409 sysenter_exit:
16410+
16411+#ifdef CONFIG_PAX_RANDKSTACK
16412+ pushl_cfi %eax
16413+ movl %esp, %eax
16414+ call pax_randomize_kstack
16415+ popl_cfi %eax
16416+#endif
16417+
16418+ pax_erase_kstack
16419+
16420 /* if something modifies registers it must also disable sysexit */
16421 movl PT_EIP(%esp), %edx
16422 movl PT_OLDESP(%esp), %ecx
16423 xorl %ebp,%ebp
16424 TRACE_IRQS_ON
16425 1: mov PT_FS(%esp), %fs
16426+2: mov PT_DS(%esp), %ds
16427+3: mov PT_ES(%esp), %es
16428 PTGS_TO_GS
16429 ENABLE_INTERRUPTS_SYSEXIT
16430
16431@@ -477,6 +650,9 @@ sysenter_audit:
16432 movl %eax,%edx /* 2nd arg: syscall number */
16433 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
16434 call audit_syscall_entry
16435+
16436+ pax_erase_kstack
16437+
16438 pushl %ebx
16439 CFI_ADJUST_CFA_OFFSET 4
16440 movl PT_EAX(%esp),%eax /* reload syscall number */
16441@@ -504,11 +680,17 @@ sysexit_audit:
16442
16443 CFI_ENDPROC
16444 .pushsection .fixup,"ax"
16445-2: movl $0,PT_FS(%esp)
16446+4: movl $0,PT_FS(%esp)
16447+ jmp 1b
16448+5: movl $0,PT_DS(%esp)
16449+ jmp 1b
16450+6: movl $0,PT_ES(%esp)
16451 jmp 1b
16452 .section __ex_table,"a"
16453 .align 4
16454- .long 1b,2b
16455+ .long 1b,4b
16456+ .long 2b,5b
16457+ .long 3b,6b
16458 .popsection
16459 PTGS_TO_GS_EX
16460 ENDPROC(ia32_sysenter_target)
16461@@ -538,6 +720,15 @@ syscall_exit:
16462 testl $_TIF_ALLWORK_MASK, %ecx # current->work
16463 jne syscall_exit_work
16464
16465+restore_all_pax:
16466+
16467+#ifdef CONFIG_PAX_RANDKSTACK
16468+ movl %esp, %eax
16469+ call pax_randomize_kstack
16470+#endif
16471+
16472+ pax_erase_kstack
16473+
16474 restore_all:
16475 TRACE_IRQS_IRET
16476 restore_all_notrace:
16477@@ -602,10 +793,29 @@ ldt_ss:
16478 mov PT_OLDESP(%esp), %eax /* load userspace esp */
16479 mov %dx, %ax /* eax: new kernel esp */
16480 sub %eax, %edx /* offset (low word is 0) */
16481- PER_CPU(gdt_page, %ebx)
16482+#ifdef CONFIG_SMP
16483+ movl PER_CPU_VAR(cpu_number), %ebx
16484+ shll $PAGE_SHIFT_asm, %ebx
16485+ addl $cpu_gdt_table, %ebx
16486+#else
16487+ movl $cpu_gdt_table, %ebx
16488+#endif
16489 shr $16, %edx
16490+
16491+#ifdef CONFIG_PAX_KERNEXEC
16492+ mov %cr0, %esi
16493+ btr $16, %esi
16494+ mov %esi, %cr0
16495+#endif
16496+
16497 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
16498 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
16499+
16500+#ifdef CONFIG_PAX_KERNEXEC
16501+ bts $16, %esi
16502+ mov %esi, %cr0
16503+#endif
16504+
16505 pushl $__ESPFIX_SS
16506 CFI_ADJUST_CFA_OFFSET 4
16507 push %eax /* new kernel esp */
16508@@ -636,36 +846,30 @@ work_resched:
16509 movl TI_flags(%ebp), %ecx
16510 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
16511 # than syscall tracing?
16512- jz restore_all
16513+ jz restore_all_pax
16514 testb $_TIF_NEED_RESCHED, %cl
16515 jnz work_resched
16516
16517 work_notifysig: # deal with pending signals and
16518 # notify-resume requests
16519+ movl %esp, %eax
16520 #ifdef CONFIG_VM86
16521 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
16522- movl %esp, %eax
16523- jne work_notifysig_v86 # returning to kernel-space or
16524+ jz 1f # returning to kernel-space or
16525 # vm86-space
16526- xorl %edx, %edx
16527- call do_notify_resume
16528- jmp resume_userspace_sig
16529
16530- ALIGN
16531-work_notifysig_v86:
16532 pushl %ecx # save ti_flags for do_notify_resume
16533 CFI_ADJUST_CFA_OFFSET 4
16534 call save_v86_state # %eax contains pt_regs pointer
16535 popl %ecx
16536 CFI_ADJUST_CFA_OFFSET -4
16537 movl %eax, %esp
16538-#else
16539- movl %esp, %eax
16540+1:
16541 #endif
16542 xorl %edx, %edx
16543 call do_notify_resume
16544 jmp resume_userspace_sig
16545-END(work_pending)
16546+ENDPROC(work_pending)
16547
16548 # perform syscall exit tracing
16549 ALIGN
16550@@ -673,11 +877,14 @@ syscall_trace_entry:
16551 movl $-ENOSYS,PT_EAX(%esp)
16552 movl %esp, %eax
16553 call syscall_trace_enter
16554+
16555+ pax_erase_kstack
16556+
16557 /* What it returned is what we'll actually use. */
16558 cmpl $(nr_syscalls), %eax
16559 jnae syscall_call
16560 jmp syscall_exit
16561-END(syscall_trace_entry)
16562+ENDPROC(syscall_trace_entry)
16563
16564 # perform syscall exit tracing
16565 ALIGN
16566@@ -690,20 +897,24 @@ syscall_exit_work:
16567 movl %esp, %eax
16568 call syscall_trace_leave
16569 jmp resume_userspace
16570-END(syscall_exit_work)
16571+ENDPROC(syscall_exit_work)
16572 CFI_ENDPROC
16573
16574 RING0_INT_FRAME # can't unwind into user space anyway
16575 syscall_fault:
16576+#ifdef CONFIG_PAX_MEMORY_UDEREF
16577+ push %ss
16578+ pop %ds
16579+#endif
16580 GET_THREAD_INFO(%ebp)
16581 movl $-EFAULT,PT_EAX(%esp)
16582 jmp resume_userspace
16583-END(syscall_fault)
16584+ENDPROC(syscall_fault)
16585
16586 syscall_badsys:
16587 movl $-ENOSYS,PT_EAX(%esp)
16588 jmp resume_userspace
16589-END(syscall_badsys)
16590+ENDPROC(syscall_badsys)
16591 CFI_ENDPROC
16592
16593 /*
16594@@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
16595 PTREGSCALL(vm86)
16596 PTREGSCALL(vm86old)
16597
16598+ ALIGN;
16599+ENTRY(kernel_execve)
16600+ push %ebp
16601+ sub $PT_OLDSS+4,%esp
16602+ push %edi
16603+ push %ecx
16604+ push %eax
16605+ lea 3*4(%esp),%edi
16606+ mov $PT_OLDSS/4+1,%ecx
16607+ xorl %eax,%eax
16608+ rep stosl
16609+ pop %eax
16610+ pop %ecx
16611+ pop %edi
16612+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
16613+ mov %eax,PT_EBX(%esp)
16614+ mov %edx,PT_ECX(%esp)
16615+ mov %ecx,PT_EDX(%esp)
16616+ mov %esp,%eax
16617+ call sys_execve
16618+ GET_THREAD_INFO(%ebp)
16619+ test %eax,%eax
16620+ jz syscall_exit
16621+ add $PT_OLDSS+4,%esp
16622+ pop %ebp
16623+ ret
16624+
16625 .macro FIXUP_ESPFIX_STACK
16626 /*
16627 * Switch back for ESPFIX stack to the normal zerobased stack
16628@@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
16629 * normal stack and adjusts ESP with the matching offset.
16630 */
16631 /* fixup the stack */
16632- PER_CPU(gdt_page, %ebx)
16633+#ifdef CONFIG_SMP
16634+ movl PER_CPU_VAR(cpu_number), %ebx
16635+ shll $PAGE_SHIFT_asm, %ebx
16636+ addl $cpu_gdt_table, %ebx
16637+#else
16638+ movl $cpu_gdt_table, %ebx
16639+#endif
16640 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
16641 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
16642 shl $16, %eax
16643@@ -793,7 +1037,7 @@ vector=vector+1
16644 .endr
16645 2: jmp common_interrupt
16646 .endr
16647-END(irq_entries_start)
16648+ENDPROC(irq_entries_start)
16649
16650 .previous
16651 END(interrupt)
16652@@ -840,7 +1084,7 @@ ENTRY(coprocessor_error)
16653 CFI_ADJUST_CFA_OFFSET 4
16654 jmp error_code
16655 CFI_ENDPROC
16656-END(coprocessor_error)
16657+ENDPROC(coprocessor_error)
16658
16659 ENTRY(simd_coprocessor_error)
16660 RING0_INT_FRAME
16661@@ -850,7 +1094,7 @@ ENTRY(simd_coprocessor_error)
16662 CFI_ADJUST_CFA_OFFSET 4
16663 jmp error_code
16664 CFI_ENDPROC
16665-END(simd_coprocessor_error)
16666+ENDPROC(simd_coprocessor_error)
16667
16668 ENTRY(device_not_available)
16669 RING0_INT_FRAME
16670@@ -860,7 +1104,7 @@ ENTRY(device_not_available)
16671 CFI_ADJUST_CFA_OFFSET 4
16672 jmp error_code
16673 CFI_ENDPROC
16674-END(device_not_available)
16675+ENDPROC(device_not_available)
16676
16677 #ifdef CONFIG_PARAVIRT
16678 ENTRY(native_iret)
16679@@ -869,12 +1113,12 @@ ENTRY(native_iret)
16680 .align 4
16681 .long native_iret, iret_exc
16682 .previous
16683-END(native_iret)
16684+ENDPROC(native_iret)
16685
16686 ENTRY(native_irq_enable_sysexit)
16687 sti
16688 sysexit
16689-END(native_irq_enable_sysexit)
16690+ENDPROC(native_irq_enable_sysexit)
16691 #endif
16692
16693 ENTRY(overflow)
16694@@ -885,7 +1129,7 @@ ENTRY(overflow)
16695 CFI_ADJUST_CFA_OFFSET 4
16696 jmp error_code
16697 CFI_ENDPROC
16698-END(overflow)
16699+ENDPROC(overflow)
16700
16701 ENTRY(bounds)
16702 RING0_INT_FRAME
16703@@ -895,7 +1139,7 @@ ENTRY(bounds)
16704 CFI_ADJUST_CFA_OFFSET 4
16705 jmp error_code
16706 CFI_ENDPROC
16707-END(bounds)
16708+ENDPROC(bounds)
16709
16710 ENTRY(invalid_op)
16711 RING0_INT_FRAME
16712@@ -905,7 +1149,7 @@ ENTRY(invalid_op)
16713 CFI_ADJUST_CFA_OFFSET 4
16714 jmp error_code
16715 CFI_ENDPROC
16716-END(invalid_op)
16717+ENDPROC(invalid_op)
16718
16719 ENTRY(coprocessor_segment_overrun)
16720 RING0_INT_FRAME
16721@@ -915,7 +1159,7 @@ ENTRY(coprocessor_segment_overrun)
16722 CFI_ADJUST_CFA_OFFSET 4
16723 jmp error_code
16724 CFI_ENDPROC
16725-END(coprocessor_segment_overrun)
16726+ENDPROC(coprocessor_segment_overrun)
16727
16728 ENTRY(invalid_TSS)
16729 RING0_EC_FRAME
16730@@ -923,7 +1167,7 @@ ENTRY(invalid_TSS)
16731 CFI_ADJUST_CFA_OFFSET 4
16732 jmp error_code
16733 CFI_ENDPROC
16734-END(invalid_TSS)
16735+ENDPROC(invalid_TSS)
16736
16737 ENTRY(segment_not_present)
16738 RING0_EC_FRAME
16739@@ -931,7 +1175,7 @@ ENTRY(segment_not_present)
16740 CFI_ADJUST_CFA_OFFSET 4
16741 jmp error_code
16742 CFI_ENDPROC
16743-END(segment_not_present)
16744+ENDPROC(segment_not_present)
16745
16746 ENTRY(stack_segment)
16747 RING0_EC_FRAME
16748@@ -939,7 +1183,7 @@ ENTRY(stack_segment)
16749 CFI_ADJUST_CFA_OFFSET 4
16750 jmp error_code
16751 CFI_ENDPROC
16752-END(stack_segment)
16753+ENDPROC(stack_segment)
16754
16755 ENTRY(alignment_check)
16756 RING0_EC_FRAME
16757@@ -947,7 +1191,7 @@ ENTRY(alignment_check)
16758 CFI_ADJUST_CFA_OFFSET 4
16759 jmp error_code
16760 CFI_ENDPROC
16761-END(alignment_check)
16762+ENDPROC(alignment_check)
16763
16764 ENTRY(divide_error)
16765 RING0_INT_FRAME
16766@@ -957,7 +1201,7 @@ ENTRY(divide_error)
16767 CFI_ADJUST_CFA_OFFSET 4
16768 jmp error_code
16769 CFI_ENDPROC
16770-END(divide_error)
16771+ENDPROC(divide_error)
16772
16773 #ifdef CONFIG_X86_MCE
16774 ENTRY(machine_check)
16775@@ -968,7 +1212,7 @@ ENTRY(machine_check)
16776 CFI_ADJUST_CFA_OFFSET 4
16777 jmp error_code
16778 CFI_ENDPROC
16779-END(machine_check)
16780+ENDPROC(machine_check)
16781 #endif
16782
16783 ENTRY(spurious_interrupt_bug)
16784@@ -979,7 +1223,7 @@ ENTRY(spurious_interrupt_bug)
16785 CFI_ADJUST_CFA_OFFSET 4
16786 jmp error_code
16787 CFI_ENDPROC
16788-END(spurious_interrupt_bug)
16789+ENDPROC(spurious_interrupt_bug)
16790
16791 ENTRY(kernel_thread_helper)
16792 pushl $0 # fake return address for unwinder
16793@@ -1095,7 +1339,7 @@ ENDPROC(xen_failsafe_callback)
16794
16795 ENTRY(mcount)
16796 ret
16797-END(mcount)
16798+ENDPROC(mcount)
16799
16800 ENTRY(ftrace_caller)
16801 cmpl $0, function_trace_stop
16802@@ -1124,7 +1368,7 @@ ftrace_graph_call:
16803 .globl ftrace_stub
16804 ftrace_stub:
16805 ret
16806-END(ftrace_caller)
16807+ENDPROC(ftrace_caller)
16808
16809 #else /* ! CONFIG_DYNAMIC_FTRACE */
16810
16811@@ -1160,7 +1404,7 @@ trace:
16812 popl %ecx
16813 popl %eax
16814 jmp ftrace_stub
16815-END(mcount)
16816+ENDPROC(mcount)
16817 #endif /* CONFIG_DYNAMIC_FTRACE */
16818 #endif /* CONFIG_FUNCTION_TRACER */
16819
16820@@ -1181,7 +1425,7 @@ ENTRY(ftrace_graph_caller)
16821 popl %ecx
16822 popl %eax
16823 ret
16824-END(ftrace_graph_caller)
16825+ENDPROC(ftrace_graph_caller)
16826
16827 .globl return_to_handler
16828 return_to_handler:
16829@@ -1198,7 +1442,6 @@ return_to_handler:
16830 ret
16831 #endif
16832
16833-.section .rodata,"a"
16834 #include "syscall_table_32.S"
16835
16836 syscall_table_size=(.-sys_call_table)
16837@@ -1255,15 +1498,18 @@ error_code:
16838 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
16839 REG_TO_PTGS %ecx
16840 SET_KERNEL_GS %ecx
16841- movl $(__USER_DS), %ecx
16842+ movl $(__KERNEL_DS), %ecx
16843 movl %ecx, %ds
16844 movl %ecx, %es
16845+
16846+ pax_enter_kernel
16847+
16848 TRACE_IRQS_OFF
16849 movl %esp,%eax # pt_regs pointer
16850 call *%edi
16851 jmp ret_from_exception
16852 CFI_ENDPROC
16853-END(page_fault)
16854+ENDPROC(page_fault)
16855
16856 /*
16857 * Debug traps and NMI can happen at the one SYSENTER instruction
16858@@ -1309,7 +1555,7 @@ debug_stack_correct:
16859 call do_debug
16860 jmp ret_from_exception
16861 CFI_ENDPROC
16862-END(debug)
16863+ENDPROC(debug)
16864
16865 /*
16866 * NMI is doubly nasty. It can happen _while_ we're handling
16867@@ -1351,6 +1597,9 @@ nmi_stack_correct:
16868 xorl %edx,%edx # zero error code
16869 movl %esp,%eax # pt_regs pointer
16870 call do_nmi
16871+
16872+ pax_exit_kernel
16873+
16874 jmp restore_all_notrace
16875 CFI_ENDPROC
16876
16877@@ -1391,12 +1640,15 @@ nmi_espfix_stack:
16878 FIXUP_ESPFIX_STACK # %eax == %esp
16879 xorl %edx,%edx # zero error code
16880 call do_nmi
16881+
16882+ pax_exit_kernel
16883+
16884 RESTORE_REGS
16885 lss 12+4(%esp), %esp # back to espfix stack
16886 CFI_ADJUST_CFA_OFFSET -24
16887 jmp irq_return
16888 CFI_ENDPROC
16889-END(nmi)
16890+ENDPROC(nmi)
16891
16892 ENTRY(int3)
16893 RING0_INT_FRAME
16894@@ -1409,7 +1661,7 @@ ENTRY(int3)
16895 call do_int3
16896 jmp ret_from_exception
16897 CFI_ENDPROC
16898-END(int3)
16899+ENDPROC(int3)
16900
16901 ENTRY(general_protection)
16902 RING0_EC_FRAME
16903@@ -1417,7 +1669,7 @@ ENTRY(general_protection)
16904 CFI_ADJUST_CFA_OFFSET 4
16905 jmp error_code
16906 CFI_ENDPROC
16907-END(general_protection)
16908+ENDPROC(general_protection)
16909
16910 /*
16911 * End of kprobes section
16912diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
16913index 34a56a9..74613c5 100644
16914--- a/arch/x86/kernel/entry_64.S
16915+++ b/arch/x86/kernel/entry_64.S
16916@@ -53,6 +53,8 @@
16917 #include <asm/paravirt.h>
16918 #include <asm/ftrace.h>
16919 #include <asm/percpu.h>
16920+#include <asm/pgtable.h>
16921+#include <asm/alternative-asm.h>
16922
16923 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
16924 #include <linux/elf-em.h>
16925@@ -64,8 +66,9 @@
16926 #ifdef CONFIG_FUNCTION_TRACER
16927 #ifdef CONFIG_DYNAMIC_FTRACE
16928 ENTRY(mcount)
16929+ pax_force_retaddr
16930 retq
16931-END(mcount)
16932+ENDPROC(mcount)
16933
16934 ENTRY(ftrace_caller)
16935 cmpl $0, function_trace_stop
16936@@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
16937 #endif
16938
16939 GLOBAL(ftrace_stub)
16940+ pax_force_retaddr
16941 retq
16942-END(ftrace_caller)
16943+ENDPROC(ftrace_caller)
16944
16945 #else /* ! CONFIG_DYNAMIC_FTRACE */
16946 ENTRY(mcount)
16947@@ -108,6 +112,7 @@ ENTRY(mcount)
16948 #endif
16949
16950 GLOBAL(ftrace_stub)
16951+ pax_force_retaddr
16952 retq
16953
16954 trace:
16955@@ -117,12 +122,13 @@ trace:
16956 movq 8(%rbp), %rsi
16957 subq $MCOUNT_INSN_SIZE, %rdi
16958
16959+ pax_force_fptr ftrace_trace_function
16960 call *ftrace_trace_function
16961
16962 MCOUNT_RESTORE_FRAME
16963
16964 jmp ftrace_stub
16965-END(mcount)
16966+ENDPROC(mcount)
16967 #endif /* CONFIG_DYNAMIC_FTRACE */
16968 #endif /* CONFIG_FUNCTION_TRACER */
16969
16970@@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
16971
16972 MCOUNT_RESTORE_FRAME
16973
16974+ pax_force_retaddr
16975 retq
16976-END(ftrace_graph_caller)
16977+ENDPROC(ftrace_graph_caller)
16978
16979 GLOBAL(return_to_handler)
16980 subq $24, %rsp
16981@@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
16982 movq 8(%rsp), %rdx
16983 movq (%rsp), %rax
16984 addq $16, %rsp
16985+ pax_force_retaddr
16986 retq
16987 #endif
16988
16989@@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
16990 ENDPROC(native_usergs_sysret64)
16991 #endif /* CONFIG_PARAVIRT */
16992
16993+ .macro ljmpq sel, off
16994+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
16995+ .byte 0x48; ljmp *1234f(%rip)
16996+ .pushsection .rodata
16997+ .align 16
16998+ 1234: .quad \off; .word \sel
16999+ .popsection
17000+#else
17001+ pushq $\sel
17002+ pushq $\off
17003+ lretq
17004+#endif
17005+ .endm
17006+
17007+ .macro pax_enter_kernel
17008+ pax_set_fptr_mask
17009+#ifdef CONFIG_PAX_KERNEXEC
17010+ call pax_enter_kernel
17011+#endif
17012+ .endm
17013+
17014+ .macro pax_exit_kernel
17015+#ifdef CONFIG_PAX_KERNEXEC
17016+ call pax_exit_kernel
17017+#endif
17018+ .endm
17019+
17020+#ifdef CONFIG_PAX_KERNEXEC
17021+ENTRY(pax_enter_kernel)
17022+ pushq %rdi
17023+
17024+#ifdef CONFIG_PARAVIRT
17025+ PV_SAVE_REGS(CLBR_RDI)
17026+#endif
17027+
17028+ GET_CR0_INTO_RDI
17029+ bts $16,%rdi
17030+ jnc 3f
17031+ mov %cs,%edi
17032+ cmp $__KERNEL_CS,%edi
17033+ jnz 2f
17034+1:
17035+
17036+#ifdef CONFIG_PARAVIRT
17037+ PV_RESTORE_REGS(CLBR_RDI)
17038+#endif
17039+
17040+ popq %rdi
17041+ pax_force_retaddr
17042+ retq
17043+
17044+2: ljmpq __KERNEL_CS,1f
17045+3: ljmpq __KERNEXEC_KERNEL_CS,4f
17046+4: SET_RDI_INTO_CR0
17047+ jmp 1b
17048+ENDPROC(pax_enter_kernel)
17049+
17050+ENTRY(pax_exit_kernel)
17051+ pushq %rdi
17052+
17053+#ifdef CONFIG_PARAVIRT
17054+ PV_SAVE_REGS(CLBR_RDI)
17055+#endif
17056+
17057+ mov %cs,%rdi
17058+ cmp $__KERNEXEC_KERNEL_CS,%edi
17059+ jz 2f
17060+1:
17061+
17062+#ifdef CONFIG_PARAVIRT
17063+ PV_RESTORE_REGS(CLBR_RDI);
17064+#endif
17065+
17066+ popq %rdi
17067+ pax_force_retaddr
17068+ retq
17069+
17070+2: GET_CR0_INTO_RDI
17071+ btr $16,%rdi
17072+ ljmpq __KERNEL_CS,3f
17073+3: SET_RDI_INTO_CR0
17074+ jmp 1b
17075+#ifdef CONFIG_PARAVIRT
17076+ PV_RESTORE_REGS(CLBR_RDI);
17077+#endif
17078+
17079+ popq %rdi
17080+ pax_force_retaddr
17081+ retq
17082+ENDPROC(pax_exit_kernel)
17083+#endif
17084+
17085+ .macro pax_enter_kernel_user
17086+ pax_set_fptr_mask
17087+#ifdef CONFIG_PAX_MEMORY_UDEREF
17088+ call pax_enter_kernel_user
17089+#endif
17090+ .endm
17091+
17092+ .macro pax_exit_kernel_user
17093+#ifdef CONFIG_PAX_MEMORY_UDEREF
17094+ call pax_exit_kernel_user
17095+#endif
17096+#ifdef CONFIG_PAX_RANDKSTACK
17097+ pushq %rax
17098+ call pax_randomize_kstack
17099+ popq %rax
17100+#endif
17101+ .endm
17102+
17103+#ifdef CONFIG_PAX_MEMORY_UDEREF
17104+ENTRY(pax_enter_kernel_user)
17105+ pushq %rdi
17106+ pushq %rbx
17107+
17108+#ifdef CONFIG_PARAVIRT
17109+ PV_SAVE_REGS(CLBR_RDI)
17110+#endif
17111+
17112+ GET_CR3_INTO_RDI
17113+ mov %rdi,%rbx
17114+ add $__START_KERNEL_map,%rbx
17115+ sub phys_base(%rip),%rbx
17116+
17117+#ifdef CONFIG_PARAVIRT
17118+ pushq %rdi
17119+ cmpl $0, pv_info+PARAVIRT_enabled
17120+ jz 1f
17121+ i = 0
17122+ .rept USER_PGD_PTRS
17123+ mov i*8(%rbx),%rsi
17124+ mov $0,%sil
17125+ lea i*8(%rbx),%rdi
17126+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
17127+ i = i + 1
17128+ .endr
17129+ jmp 2f
17130+1:
17131+#endif
17132+
17133+ i = 0
17134+ .rept USER_PGD_PTRS
17135+ movb $0,i*8(%rbx)
17136+ i = i + 1
17137+ .endr
17138+
17139+#ifdef CONFIG_PARAVIRT
17140+2: popq %rdi
17141+#endif
17142+ SET_RDI_INTO_CR3
17143+
17144+#ifdef CONFIG_PAX_KERNEXEC
17145+ GET_CR0_INTO_RDI
17146+ bts $16,%rdi
17147+ SET_RDI_INTO_CR0
17148+#endif
17149+
17150+#ifdef CONFIG_PARAVIRT
17151+ PV_RESTORE_REGS(CLBR_RDI)
17152+#endif
17153+
17154+ popq %rbx
17155+ popq %rdi
17156+ pax_force_retaddr
17157+ retq
17158+ENDPROC(pax_enter_kernel_user)
17159+
17160+ENTRY(pax_exit_kernel_user)
17161+ push %rdi
17162+
17163+#ifdef CONFIG_PARAVIRT
17164+ pushq %rbx
17165+ PV_SAVE_REGS(CLBR_RDI)
17166+#endif
17167+
17168+#ifdef CONFIG_PAX_KERNEXEC
17169+ GET_CR0_INTO_RDI
17170+ btr $16,%rdi
17171+ SET_RDI_INTO_CR0
17172+#endif
17173+
17174+ GET_CR3_INTO_RDI
17175+ add $__START_KERNEL_map,%rdi
17176+ sub phys_base(%rip),%rdi
17177+
17178+#ifdef CONFIG_PARAVIRT
17179+ cmpl $0, pv_info+PARAVIRT_enabled
17180+ jz 1f
17181+ mov %rdi,%rbx
17182+ i = 0
17183+ .rept USER_PGD_PTRS
17184+ mov i*8(%rbx),%rsi
17185+ mov $0x67,%sil
17186+ lea i*8(%rbx),%rdi
17187+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
17188+ i = i + 1
17189+ .endr
17190+ jmp 2f
17191+1:
17192+#endif
17193+
17194+ i = 0
17195+ .rept USER_PGD_PTRS
17196+ movb $0x67,i*8(%rdi)
17197+ i = i + 1
17198+ .endr
17199+
17200+#ifdef CONFIG_PARAVIRT
17201+2: PV_RESTORE_REGS(CLBR_RDI)
17202+ popq %rbx
17203+#endif
17204+
17205+ popq %rdi
17206+ pax_force_retaddr
17207+ retq
17208+ENDPROC(pax_exit_kernel_user)
17209+#endif
17210+
17211+.macro pax_erase_kstack
17212+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17213+ call pax_erase_kstack
17214+#endif
17215+.endm
17216+
17217+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17218+/*
17219+ * r11: thread_info
17220+ * rcx, rdx: can be clobbered
17221+ */
17222+ENTRY(pax_erase_kstack)
17223+ pushq %rdi
17224+ pushq %rax
17225+ pushq %r11
17226+
17227+ GET_THREAD_INFO(%r11)
17228+ mov TI_lowest_stack(%r11), %rdi
17229+ mov $-0xBEEF, %rax
17230+ std
17231+
17232+1: mov %edi, %ecx
17233+ and $THREAD_SIZE_asm - 1, %ecx
17234+ shr $3, %ecx
17235+ repne scasq
17236+ jecxz 2f
17237+
17238+ cmp $2*8, %ecx
17239+ jc 2f
17240+
17241+ mov $2*8, %ecx
17242+ repe scasq
17243+ jecxz 2f
17244+ jne 1b
17245+
17246+2: cld
17247+ mov %esp, %ecx
17248+ sub %edi, %ecx
17249+
17250+ cmp $THREAD_SIZE_asm, %rcx
17251+ jb 3f
17252+ ud2
17253+3:
17254+
17255+ shr $3, %ecx
17256+ rep stosq
17257+
17258+ mov TI_task_thread_sp0(%r11), %rdi
17259+ sub $256, %rdi
17260+ mov %rdi, TI_lowest_stack(%r11)
17261+
17262+ popq %r11
17263+ popq %rax
17264+ popq %rdi
17265+ pax_force_retaddr
17266+ ret
17267+ENDPROC(pax_erase_kstack)
17268+#endif
17269
17270 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
17271 #ifdef CONFIG_TRACE_IRQFLAGS
17272@@ -233,8 +517,8 @@ ENDPROC(native_usergs_sysret64)
17273 .endm
17274
17275 .macro UNFAKE_STACK_FRAME
17276- addq $8*6, %rsp
17277- CFI_ADJUST_CFA_OFFSET -(6*8)
17278+ addq $8*6 + ARG_SKIP, %rsp
17279+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
17280 .endm
17281
17282 /*
17283@@ -317,7 +601,7 @@ ENTRY(save_args)
17284 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
17285 movq_cfi rbp, 8 /* push %rbp */
17286 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
17287- testl $3, CS(%rdi)
17288+ testb $3, CS(%rdi)
17289 je 1f
17290 SWAPGS
17291 /*
17292@@ -337,9 +621,10 @@ ENTRY(save_args)
17293 * We entered an interrupt context - irqs are off:
17294 */
17295 2: TRACE_IRQS_OFF
17296+ pax_force_retaddr_bts
17297 ret
17298 CFI_ENDPROC
17299-END(save_args)
17300+ENDPROC(save_args)
17301
17302 ENTRY(save_rest)
17303 PARTIAL_FRAME 1 REST_SKIP+8
17304@@ -352,9 +637,10 @@ ENTRY(save_rest)
17305 movq_cfi r15, R15+16
17306 movq %r11, 8(%rsp) /* return address */
17307 FIXUP_TOP_OF_STACK %r11, 16
17308+ pax_force_retaddr
17309 ret
17310 CFI_ENDPROC
17311-END(save_rest)
17312+ENDPROC(save_rest)
17313
17314 /* save complete stack frame */
17315 .pushsection .kprobes.text, "ax"
17316@@ -383,9 +669,10 @@ ENTRY(save_paranoid)
17317 js 1f /* negative -> in kernel */
17318 SWAPGS
17319 xorl %ebx,%ebx
17320-1: ret
17321+1: pax_force_retaddr_bts
17322+ ret
17323 CFI_ENDPROC
17324-END(save_paranoid)
17325+ENDPROC(save_paranoid)
17326 .popsection
17327
17328 /*
17329@@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
17330
17331 RESTORE_REST
17332
17333- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
17334+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
17335 je int_ret_from_sys_call
17336
17337 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
17338@@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
17339 jmp ret_from_sys_call # go to the SYSRET fastpath
17340
17341 CFI_ENDPROC
17342-END(ret_from_fork)
17343+ENDPROC(ret_from_fork)
17344
17345 /*
17346 * System call entry. Upto 6 arguments in registers are supported.
17347@@ -455,7 +742,7 @@ END(ret_from_fork)
17348 ENTRY(system_call)
17349 CFI_STARTPROC simple
17350 CFI_SIGNAL_FRAME
17351- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
17352+ CFI_DEF_CFA rsp,0
17353 CFI_REGISTER rip,rcx
17354 /*CFI_REGISTER rflags,r11*/
17355 SWAPGS_UNSAFE_STACK
17356@@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
17357
17358 movq %rsp,PER_CPU_VAR(old_rsp)
17359 movq PER_CPU_VAR(kernel_stack),%rsp
17360+ SAVE_ARGS 8*6,1
17361+ pax_enter_kernel_user
17362 /*
17363 * No need to follow this irqs off/on section - it's straight
17364 * and short:
17365 */
17366 ENABLE_INTERRUPTS(CLBR_NONE)
17367- SAVE_ARGS 8,1
17368 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
17369 movq %rcx,RIP-ARGOFFSET(%rsp)
17370 CFI_REL_OFFSET rip,RIP-ARGOFFSET
17371@@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
17372 system_call_fastpath:
17373 cmpq $__NR_syscall_max,%rax
17374 ja badsys
17375- movq %r10,%rcx
17376+ movq R10-ARGOFFSET(%rsp),%rcx
17377 call *sys_call_table(,%rax,8) # XXX: rip relative
17378 movq %rax,RAX-ARGOFFSET(%rsp)
17379 /*
17380@@ -502,6 +790,8 @@ sysret_check:
17381 andl %edi,%edx
17382 jnz sysret_careful
17383 CFI_REMEMBER_STATE
17384+ pax_exit_kernel_user
17385+ pax_erase_kstack
17386 /*
17387 * sysretq will re-enable interrupts:
17388 */
17389@@ -555,14 +845,18 @@ badsys:
17390 * jump back to the normal fast path.
17391 */
17392 auditsys:
17393- movq %r10,%r9 /* 6th arg: 4th syscall arg */
17394+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
17395 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
17396 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
17397 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
17398 movq %rax,%rsi /* 2nd arg: syscall number */
17399 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
17400 call audit_syscall_entry
17401+
17402+ pax_erase_kstack
17403+
17404 LOAD_ARGS 0 /* reload call-clobbered registers */
17405+ pax_set_fptr_mask
17406 jmp system_call_fastpath
17407
17408 /*
17409@@ -592,16 +886,20 @@ tracesys:
17410 FIXUP_TOP_OF_STACK %rdi
17411 movq %rsp,%rdi
17412 call syscall_trace_enter
17413+
17414+ pax_erase_kstack
17415+
17416 /*
17417 * Reload arg registers from stack in case ptrace changed them.
17418 * We don't reload %rax because syscall_trace_enter() returned
17419 * the value it wants us to use in the table lookup.
17420 */
17421 LOAD_ARGS ARGOFFSET, 1
17422+ pax_set_fptr_mask
17423 RESTORE_REST
17424 cmpq $__NR_syscall_max,%rax
17425 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
17426- movq %r10,%rcx /* fixup for C */
17427+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
17428 call *sys_call_table(,%rax,8)
17429 movq %rax,RAX-ARGOFFSET(%rsp)
17430 /* Use IRET because user could have changed frame */
17431@@ -613,7 +911,7 @@ tracesys:
17432 GLOBAL(int_ret_from_sys_call)
17433 DISABLE_INTERRUPTS(CLBR_NONE)
17434 TRACE_IRQS_OFF
17435- testl $3,CS-ARGOFFSET(%rsp)
17436+ testb $3,CS-ARGOFFSET(%rsp)
17437 je retint_restore_args
17438 movl $_TIF_ALLWORK_MASK,%edi
17439 /* edi: mask to check */
17440@@ -624,6 +922,7 @@ GLOBAL(int_with_check)
17441 andl %edi,%edx
17442 jnz int_careful
17443 andl $~TS_COMPAT,TI_status(%rcx)
17444+ pax_erase_kstack
17445 jmp retint_swapgs
17446
17447 /* Either reschedule or signal or syscall exit tracking needed. */
17448@@ -674,7 +973,7 @@ int_restore_rest:
17449 TRACE_IRQS_OFF
17450 jmp int_with_check
17451 CFI_ENDPROC
17452-END(system_call)
17453+ENDPROC(system_call)
17454
17455 /*
17456 * Certain special system calls that need to save a complete full stack frame.
17457@@ -690,7 +989,7 @@ ENTRY(\label)
17458 call \func
17459 jmp ptregscall_common
17460 CFI_ENDPROC
17461-END(\label)
17462+ENDPROC(\label)
17463 .endm
17464
17465 PTREGSCALL stub_clone, sys_clone, %r8
17466@@ -708,9 +1007,10 @@ ENTRY(ptregscall_common)
17467 movq_cfi_restore R12+8, r12
17468 movq_cfi_restore RBP+8, rbp
17469 movq_cfi_restore RBX+8, rbx
17470+ pax_force_retaddr
17471 ret $REST_SKIP /* pop extended registers */
17472 CFI_ENDPROC
17473-END(ptregscall_common)
17474+ENDPROC(ptregscall_common)
17475
17476 ENTRY(stub_execve)
17477 CFI_STARTPROC
17478@@ -726,7 +1026,7 @@ ENTRY(stub_execve)
17479 RESTORE_REST
17480 jmp int_ret_from_sys_call
17481 CFI_ENDPROC
17482-END(stub_execve)
17483+ENDPROC(stub_execve)
17484
17485 /*
17486 * sigreturn is special because it needs to restore all registers on return.
17487@@ -744,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
17488 RESTORE_REST
17489 jmp int_ret_from_sys_call
17490 CFI_ENDPROC
17491-END(stub_rt_sigreturn)
17492+ENDPROC(stub_rt_sigreturn)
17493
17494 /*
17495 * Build the entry stubs and pointer table with some assembler magic.
17496@@ -780,7 +1080,7 @@ vector=vector+1
17497 2: jmp common_interrupt
17498 .endr
17499 CFI_ENDPROC
17500-END(irq_entries_start)
17501+ENDPROC(irq_entries_start)
17502
17503 .previous
17504 END(interrupt)
17505@@ -800,6 +1100,16 @@ END(interrupt)
17506 CFI_ADJUST_CFA_OFFSET 10*8
17507 call save_args
17508 PARTIAL_FRAME 0
17509+#ifdef CONFIG_PAX_MEMORY_UDEREF
17510+ testb $3, CS(%rdi)
17511+ jnz 1f
17512+ pax_enter_kernel
17513+ jmp 2f
17514+1: pax_enter_kernel_user
17515+2:
17516+#else
17517+ pax_enter_kernel
17518+#endif
17519 call \func
17520 .endm
17521
17522@@ -822,7 +1132,7 @@ ret_from_intr:
17523 CFI_ADJUST_CFA_OFFSET -8
17524 exit_intr:
17525 GET_THREAD_INFO(%rcx)
17526- testl $3,CS-ARGOFFSET(%rsp)
17527+ testb $3,CS-ARGOFFSET(%rsp)
17528 je retint_kernel
17529
17530 /* Interrupt came from user space */
17531@@ -844,12 +1154,15 @@ retint_swapgs: /* return to user-space */
17532 * The iretq could re-enable interrupts:
17533 */
17534 DISABLE_INTERRUPTS(CLBR_ANY)
17535+ pax_exit_kernel_user
17536 TRACE_IRQS_IRETQ
17537 SWAPGS
17538 jmp restore_args
17539
17540 retint_restore_args: /* return to kernel space */
17541 DISABLE_INTERRUPTS(CLBR_ANY)
17542+ pax_exit_kernel
17543+ pax_force_retaddr RIP-ARGOFFSET
17544 /*
17545 * The iretq could re-enable interrupts:
17546 */
17547@@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
17548 #endif
17549
17550 CFI_ENDPROC
17551-END(common_interrupt)
17552+ENDPROC(common_interrupt)
17553
17554 /*
17555 * APIC interrupts.
17556@@ -953,7 +1266,7 @@ ENTRY(\sym)
17557 interrupt \do_sym
17558 jmp ret_from_intr
17559 CFI_ENDPROC
17560-END(\sym)
17561+ENDPROC(\sym)
17562 .endm
17563
17564 #ifdef CONFIG_SMP
17565@@ -1032,12 +1345,22 @@ ENTRY(\sym)
17566 CFI_ADJUST_CFA_OFFSET 15*8
17567 call error_entry
17568 DEFAULT_FRAME 0
17569+#ifdef CONFIG_PAX_MEMORY_UDEREF
17570+ testb $3, CS(%rsp)
17571+ jnz 1f
17572+ pax_enter_kernel
17573+ jmp 2f
17574+1: pax_enter_kernel_user
17575+2:
17576+#else
17577+ pax_enter_kernel
17578+#endif
17579 movq %rsp,%rdi /* pt_regs pointer */
17580 xorl %esi,%esi /* no error code */
17581 call \do_sym
17582 jmp error_exit /* %ebx: no swapgs flag */
17583 CFI_ENDPROC
17584-END(\sym)
17585+ENDPROC(\sym)
17586 .endm
17587
17588 .macro paranoidzeroentry sym do_sym
17589@@ -1049,12 +1372,22 @@ ENTRY(\sym)
17590 subq $15*8, %rsp
17591 call save_paranoid
17592 TRACE_IRQS_OFF
17593+#ifdef CONFIG_PAX_MEMORY_UDEREF
17594+ testb $3, CS(%rsp)
17595+ jnz 1f
17596+ pax_enter_kernel
17597+ jmp 2f
17598+1: pax_enter_kernel_user
17599+2:
17600+#else
17601+ pax_enter_kernel
17602+#endif
17603 movq %rsp,%rdi /* pt_regs pointer */
17604 xorl %esi,%esi /* no error code */
17605 call \do_sym
17606 jmp paranoid_exit /* %ebx: no swapgs flag */
17607 CFI_ENDPROC
17608-END(\sym)
17609+ENDPROC(\sym)
17610 .endm
17611
17612 .macro paranoidzeroentry_ist sym do_sym ist
17613@@ -1066,15 +1399,30 @@ ENTRY(\sym)
17614 subq $15*8, %rsp
17615 call save_paranoid
17616 TRACE_IRQS_OFF
17617+#ifdef CONFIG_PAX_MEMORY_UDEREF
17618+ testb $3, CS(%rsp)
17619+ jnz 1f
17620+ pax_enter_kernel
17621+ jmp 2f
17622+1: pax_enter_kernel_user
17623+2:
17624+#else
17625+ pax_enter_kernel
17626+#endif
17627 movq %rsp,%rdi /* pt_regs pointer */
17628 xorl %esi,%esi /* no error code */
17629- PER_CPU(init_tss, %rbp)
17630+#ifdef CONFIG_SMP
17631+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
17632+ lea init_tss(%rbp), %rbp
17633+#else
17634+ lea init_tss(%rip), %rbp
17635+#endif
17636 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
17637 call \do_sym
17638 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
17639 jmp paranoid_exit /* %ebx: no swapgs flag */
17640 CFI_ENDPROC
17641-END(\sym)
17642+ENDPROC(\sym)
17643 .endm
17644
17645 .macro errorentry sym do_sym
17646@@ -1085,13 +1433,23 @@ ENTRY(\sym)
17647 CFI_ADJUST_CFA_OFFSET 15*8
17648 call error_entry
17649 DEFAULT_FRAME 0
17650+#ifdef CONFIG_PAX_MEMORY_UDEREF
17651+ testb $3, CS(%rsp)
17652+ jnz 1f
17653+ pax_enter_kernel
17654+ jmp 2f
17655+1: pax_enter_kernel_user
17656+2:
17657+#else
17658+ pax_enter_kernel
17659+#endif
17660 movq %rsp,%rdi /* pt_regs pointer */
17661 movq ORIG_RAX(%rsp),%rsi /* get error code */
17662 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
17663 call \do_sym
17664 jmp error_exit /* %ebx: no swapgs flag */
17665 CFI_ENDPROC
17666-END(\sym)
17667+ENDPROC(\sym)
17668 .endm
17669
17670 /* error code is on the stack already */
17671@@ -1104,13 +1462,23 @@ ENTRY(\sym)
17672 call save_paranoid
17673 DEFAULT_FRAME 0
17674 TRACE_IRQS_OFF
17675+#ifdef CONFIG_PAX_MEMORY_UDEREF
17676+ testb $3, CS(%rsp)
17677+ jnz 1f
17678+ pax_enter_kernel
17679+ jmp 2f
17680+1: pax_enter_kernel_user
17681+2:
17682+#else
17683+ pax_enter_kernel
17684+#endif
17685 movq %rsp,%rdi /* pt_regs pointer */
17686 movq ORIG_RAX(%rsp),%rsi /* get error code */
17687 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
17688 call \do_sym
17689 jmp paranoid_exit /* %ebx: no swapgs flag */
17690 CFI_ENDPROC
17691-END(\sym)
17692+ENDPROC(\sym)
17693 .endm
17694
17695 zeroentry divide_error do_divide_error
17696@@ -1141,9 +1509,10 @@ gs_change:
17697 SWAPGS
17698 popf
17699 CFI_ADJUST_CFA_OFFSET -8
17700+ pax_force_retaddr
17701 ret
17702 CFI_ENDPROC
17703-END(native_load_gs_index)
17704+ENDPROC(native_load_gs_index)
17705
17706 .section __ex_table,"a"
17707 .align 8
17708@@ -1193,11 +1562,12 @@ ENTRY(kernel_thread)
17709 * of hacks for example to fork off the per-CPU idle tasks.
17710 * [Hopefully no generic code relies on the reschedule -AK]
17711 */
17712- RESTORE_ALL
17713+ RESTORE_REST
17714 UNFAKE_STACK_FRAME
17715+ pax_force_retaddr
17716 ret
17717 CFI_ENDPROC
17718-END(kernel_thread)
17719+ENDPROC(kernel_thread)
17720
17721 ENTRY(child_rip)
17722 pushq $0 # fake return address
17723@@ -1208,13 +1578,14 @@ ENTRY(child_rip)
17724 */
17725 movq %rdi, %rax
17726 movq %rsi, %rdi
17727+ pax_force_fptr %rax
17728 call *%rax
17729 # exit
17730 mov %eax, %edi
17731 call do_exit
17732 ud2 # padding for call trace
17733 CFI_ENDPROC
17734-END(child_rip)
17735+ENDPROC(child_rip)
17736
17737 /*
17738 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
17739@@ -1241,11 +1612,11 @@ ENTRY(kernel_execve)
17740 RESTORE_REST
17741 testq %rax,%rax
17742 je int_ret_from_sys_call
17743- RESTORE_ARGS
17744 UNFAKE_STACK_FRAME
17745+ pax_force_retaddr
17746 ret
17747 CFI_ENDPROC
17748-END(kernel_execve)
17749+ENDPROC(kernel_execve)
17750
17751 /* Call softirq on interrupt stack. Interrupts are off. */
17752 ENTRY(call_softirq)
17753@@ -1263,9 +1634,10 @@ ENTRY(call_softirq)
17754 CFI_DEF_CFA_REGISTER rsp
17755 CFI_ADJUST_CFA_OFFSET -8
17756 decl PER_CPU_VAR(irq_count)
17757+ pax_force_retaddr
17758 ret
17759 CFI_ENDPROC
17760-END(call_softirq)
17761+ENDPROC(call_softirq)
17762
17763 #ifdef CONFIG_XEN
17764 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
17765@@ -1303,7 +1675,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
17766 decl PER_CPU_VAR(irq_count)
17767 jmp error_exit
17768 CFI_ENDPROC
17769-END(xen_do_hypervisor_callback)
17770+ENDPROC(xen_do_hypervisor_callback)
17771
17772 /*
17773 * Hypervisor uses this for application faults while it executes.
17774@@ -1362,7 +1734,7 @@ ENTRY(xen_failsafe_callback)
17775 SAVE_ALL
17776 jmp error_exit
17777 CFI_ENDPROC
17778-END(xen_failsafe_callback)
17779+ENDPROC(xen_failsafe_callback)
17780
17781 #endif /* CONFIG_XEN */
17782
17783@@ -1405,16 +1777,31 @@ ENTRY(paranoid_exit)
17784 TRACE_IRQS_OFF
17785 testl %ebx,%ebx /* swapgs needed? */
17786 jnz paranoid_restore
17787- testl $3,CS(%rsp)
17788+ testb $3,CS(%rsp)
17789 jnz paranoid_userspace
17790+#ifdef CONFIG_PAX_MEMORY_UDEREF
17791+ pax_exit_kernel
17792+ TRACE_IRQS_IRETQ 0
17793+ SWAPGS_UNSAFE_STACK
17794+ RESTORE_ALL 8
17795+ pax_force_retaddr_bts
17796+ jmp irq_return
17797+#endif
17798 paranoid_swapgs:
17799+#ifdef CONFIG_PAX_MEMORY_UDEREF
17800+ pax_exit_kernel_user
17801+#else
17802+ pax_exit_kernel
17803+#endif
17804 TRACE_IRQS_IRETQ 0
17805 SWAPGS_UNSAFE_STACK
17806 RESTORE_ALL 8
17807 jmp irq_return
17808 paranoid_restore:
17809+ pax_exit_kernel
17810 TRACE_IRQS_IRETQ 0
17811 RESTORE_ALL 8
17812+ pax_force_retaddr_bts
17813 jmp irq_return
17814 paranoid_userspace:
17815 GET_THREAD_INFO(%rcx)
17816@@ -1443,7 +1830,7 @@ paranoid_schedule:
17817 TRACE_IRQS_OFF
17818 jmp paranoid_userspace
17819 CFI_ENDPROC
17820-END(paranoid_exit)
17821+ENDPROC(paranoid_exit)
17822
17823 /*
17824 * Exception entry point. This expects an error code/orig_rax on the stack.
17825@@ -1470,12 +1857,13 @@ ENTRY(error_entry)
17826 movq_cfi r14, R14+8
17827 movq_cfi r15, R15+8
17828 xorl %ebx,%ebx
17829- testl $3,CS+8(%rsp)
17830+ testb $3,CS+8(%rsp)
17831 je error_kernelspace
17832 error_swapgs:
17833 SWAPGS
17834 error_sti:
17835 TRACE_IRQS_OFF
17836+ pax_force_retaddr_bts
17837 ret
17838 CFI_ENDPROC
17839
17840@@ -1497,7 +1885,7 @@ error_kernelspace:
17841 cmpq $gs_change,RIP+8(%rsp)
17842 je error_swapgs
17843 jmp error_sti
17844-END(error_entry)
17845+ENDPROC(error_entry)
17846
17847
17848 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
17849@@ -1517,7 +1905,7 @@ ENTRY(error_exit)
17850 jnz retint_careful
17851 jmp retint_swapgs
17852 CFI_ENDPROC
17853-END(error_exit)
17854+ENDPROC(error_exit)
17855
17856
17857 /* runs on exception stack */
17858@@ -1529,6 +1917,16 @@ ENTRY(nmi)
17859 CFI_ADJUST_CFA_OFFSET 15*8
17860 call save_paranoid
17861 DEFAULT_FRAME 0
17862+#ifdef CONFIG_PAX_MEMORY_UDEREF
17863+ testb $3, CS(%rsp)
17864+ jnz 1f
17865+ pax_enter_kernel
17866+ jmp 2f
17867+1: pax_enter_kernel_user
17868+2:
17869+#else
17870+ pax_enter_kernel
17871+#endif
17872 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
17873 movq %rsp,%rdi
17874 movq $-1,%rsi
17875@@ -1539,12 +1937,28 @@ ENTRY(nmi)
17876 DISABLE_INTERRUPTS(CLBR_NONE)
17877 testl %ebx,%ebx /* swapgs needed? */
17878 jnz nmi_restore
17879- testl $3,CS(%rsp)
17880+ testb $3,CS(%rsp)
17881 jnz nmi_userspace
17882+#ifdef CONFIG_PAX_MEMORY_UDEREF
17883+ pax_exit_kernel
17884+ SWAPGS_UNSAFE_STACK
17885+ RESTORE_ALL 8
17886+ pax_force_retaddr_bts
17887+ jmp irq_return
17888+#endif
17889 nmi_swapgs:
17890+#ifdef CONFIG_PAX_MEMORY_UDEREF
17891+ pax_exit_kernel_user
17892+#else
17893+ pax_exit_kernel
17894+#endif
17895 SWAPGS_UNSAFE_STACK
17896+ RESTORE_ALL 8
17897+ jmp irq_return
17898 nmi_restore:
17899+ pax_exit_kernel
17900 RESTORE_ALL 8
17901+ pax_force_retaddr_bts
17902 jmp irq_return
17903 nmi_userspace:
17904 GET_THREAD_INFO(%rcx)
17905@@ -1573,14 +1987,14 @@ nmi_schedule:
17906 jmp paranoid_exit
17907 CFI_ENDPROC
17908 #endif
17909-END(nmi)
17910+ENDPROC(nmi)
17911
17912 ENTRY(ignore_sysret)
17913 CFI_STARTPROC
17914 mov $-ENOSYS,%eax
17915 sysret
17916 CFI_ENDPROC
17917-END(ignore_sysret)
17918+ENDPROC(ignore_sysret)
17919
17920 /*
17921 * End of kprobes section
17922diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
17923index 9dbb527..7b3615a 100644
17924--- a/arch/x86/kernel/ftrace.c
17925+++ b/arch/x86/kernel/ftrace.c
17926@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the IP to write to */
17927 static void *mod_code_newcode; /* holds the text to write to the IP */
17928
17929 static unsigned nmi_wait_count;
17930-static atomic_t nmi_update_count = ATOMIC_INIT(0);
17931+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
17932
17933 int ftrace_arch_read_dyn_info(char *buf, int size)
17934 {
17935@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
17936
17937 r = snprintf(buf, size, "%u %u",
17938 nmi_wait_count,
17939- atomic_read(&nmi_update_count));
17940+ atomic_read_unchecked(&nmi_update_count));
17941 return r;
17942 }
17943
17944@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
17945 {
17946 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
17947 smp_rmb();
17948+ pax_open_kernel();
17949 ftrace_mod_code();
17950- atomic_inc(&nmi_update_count);
17951+ pax_close_kernel();
17952+ atomic_inc_unchecked(&nmi_update_count);
17953 }
17954 /* Must have previous changes seen before executions */
17955 smp_mb();
17956@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
17957
17958
17959
17960-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
17961+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
17962
17963 static unsigned char *ftrace_nop_replace(void)
17964 {
17965@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
17966 {
17967 unsigned char replaced[MCOUNT_INSN_SIZE];
17968
17969+ ip = ktla_ktva(ip);
17970+
17971 /*
17972 * Note: Due to modules and __init, code can
17973 * disappear and change, we need to protect against faulting
17974@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
17975 unsigned char old[MCOUNT_INSN_SIZE], *new;
17976 int ret;
17977
17978- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
17979+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
17980 new = ftrace_call_replace(ip, (unsigned long)func);
17981 ret = ftrace_modify_code(ip, old, new);
17982
17983@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *data)
17984 switch (faulted) {
17985 case 0:
17986 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
17987- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
17988+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
17989 break;
17990 case 1:
17991 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
17992- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
17993+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
17994 break;
17995 case 2:
17996 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
17997- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
17998+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
17999 break;
18000 }
18001
18002@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long ip,
18003 {
18004 unsigned char code[MCOUNT_INSN_SIZE];
18005
18006+ ip = ktla_ktva(ip);
18007+
18008 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
18009 return -EFAULT;
18010
18011diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
18012index 4f8e250..df24706 100644
18013--- a/arch/x86/kernel/head32.c
18014+++ b/arch/x86/kernel/head32.c
18015@@ -16,6 +16,7 @@
18016 #include <asm/apic.h>
18017 #include <asm/io_apic.h>
18018 #include <asm/bios_ebda.h>
18019+#include <asm/boot.h>
18020
18021 static void __init i386_default_early_setup(void)
18022 {
18023@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
18024 {
18025 reserve_trampoline_memory();
18026
18027- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
18028+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
18029
18030 #ifdef CONFIG_BLK_DEV_INITRD
18031 /* Reserve INITRD */
18032diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
18033index 34c3308..6fc4e76 100644
18034--- a/arch/x86/kernel/head_32.S
18035+++ b/arch/x86/kernel/head_32.S
18036@@ -19,10 +19,17 @@
18037 #include <asm/setup.h>
18038 #include <asm/processor-flags.h>
18039 #include <asm/percpu.h>
18040+#include <asm/msr-index.h>
18041
18042 /* Physical address */
18043 #define pa(X) ((X) - __PAGE_OFFSET)
18044
18045+#ifdef CONFIG_PAX_KERNEXEC
18046+#define ta(X) (X)
18047+#else
18048+#define ta(X) ((X) - __PAGE_OFFSET)
18049+#endif
18050+
18051 /*
18052 * References to members of the new_cpu_data structure.
18053 */
18054@@ -52,11 +59,7 @@
18055 * and small than max_low_pfn, otherwise will waste some page table entries
18056 */
18057
18058-#if PTRS_PER_PMD > 1
18059-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
18060-#else
18061-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
18062-#endif
18063+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
18064
18065 /* Enough space to fit pagetables for the low memory linear map */
18066 MAPPING_BEYOND_END = \
18067@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
18068 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
18069
18070 /*
18071+ * Real beginning of normal "text" segment
18072+ */
18073+ENTRY(stext)
18074+ENTRY(_stext)
18075+
18076+/*
18077 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
18078 * %esi points to the real-mode code as a 32-bit pointer.
18079 * CS and DS must be 4 GB flat segments, but we don't depend on
18080@@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
18081 * can.
18082 */
18083 __HEAD
18084+
18085+#ifdef CONFIG_PAX_KERNEXEC
18086+ jmp startup_32
18087+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
18088+.fill PAGE_SIZE-5,1,0xcc
18089+#endif
18090+
18091 ENTRY(startup_32)
18092+ movl pa(stack_start),%ecx
18093+
18094 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
18095 us to not reload segments */
18096 testb $(1<<6), BP_loadflags(%esi)
18097@@ -95,7 +113,60 @@ ENTRY(startup_32)
18098 movl %eax,%es
18099 movl %eax,%fs
18100 movl %eax,%gs
18101+ movl %eax,%ss
18102 2:
18103+ leal -__PAGE_OFFSET(%ecx),%esp
18104+
18105+#ifdef CONFIG_SMP
18106+ movl $pa(cpu_gdt_table),%edi
18107+ movl $__per_cpu_load,%eax
18108+ movw %ax,__KERNEL_PERCPU + 2(%edi)
18109+ rorl $16,%eax
18110+ movb %al,__KERNEL_PERCPU + 4(%edi)
18111+ movb %ah,__KERNEL_PERCPU + 7(%edi)
18112+ movl $__per_cpu_end - 1,%eax
18113+ subl $__per_cpu_start,%eax
18114+ movw %ax,__KERNEL_PERCPU + 0(%edi)
18115+#endif
18116+
18117+#ifdef CONFIG_PAX_MEMORY_UDEREF
18118+ movl $NR_CPUS,%ecx
18119+ movl $pa(cpu_gdt_table),%edi
18120+1:
18121+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
18122+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
18123+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
18124+ addl $PAGE_SIZE_asm,%edi
18125+ loop 1b
18126+#endif
18127+
18128+#ifdef CONFIG_PAX_KERNEXEC
18129+ movl $pa(boot_gdt),%edi
18130+ movl $__LOAD_PHYSICAL_ADDR,%eax
18131+ movw %ax,__BOOT_CS + 2(%edi)
18132+ rorl $16,%eax
18133+ movb %al,__BOOT_CS + 4(%edi)
18134+ movb %ah,__BOOT_CS + 7(%edi)
18135+ rorl $16,%eax
18136+
18137+ ljmp $(__BOOT_CS),$1f
18138+1:
18139+
18140+ movl $NR_CPUS,%ecx
18141+ movl $pa(cpu_gdt_table),%edi
18142+ addl $__PAGE_OFFSET,%eax
18143+1:
18144+ movw %ax,__KERNEL_CS + 2(%edi)
18145+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
18146+ rorl $16,%eax
18147+ movb %al,__KERNEL_CS + 4(%edi)
18148+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
18149+ movb %ah,__KERNEL_CS + 7(%edi)
18150+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
18151+ rorl $16,%eax
18152+ addl $PAGE_SIZE_asm,%edi
18153+ loop 1b
18154+#endif
18155
18156 /*
18157 * Clear BSS first so that there are no surprises...
18158@@ -140,9 +211,7 @@ ENTRY(startup_32)
18159 cmpl $num_subarch_entries, %eax
18160 jae bad_subarch
18161
18162- movl pa(subarch_entries)(,%eax,4), %eax
18163- subl $__PAGE_OFFSET, %eax
18164- jmp *%eax
18165+ jmp *pa(subarch_entries)(,%eax,4)
18166
18167 bad_subarch:
18168 WEAK(lguest_entry)
18169@@ -154,10 +223,10 @@ WEAK(xen_entry)
18170 __INITDATA
18171
18172 subarch_entries:
18173- .long default_entry /* normal x86/PC */
18174- .long lguest_entry /* lguest hypervisor */
18175- .long xen_entry /* Xen hypervisor */
18176- .long default_entry /* Moorestown MID */
18177+ .long ta(default_entry) /* normal x86/PC */
18178+ .long ta(lguest_entry) /* lguest hypervisor */
18179+ .long ta(xen_entry) /* Xen hypervisor */
18180+ .long ta(default_entry) /* Moorestown MID */
18181 num_subarch_entries = (. - subarch_entries) / 4
18182 .previous
18183 #endif /* CONFIG_PARAVIRT */
18184@@ -218,8 +287,11 @@ default_entry:
18185 movl %eax, pa(max_pfn_mapped)
18186
18187 /* Do early initialization of the fixmap area */
18188- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
18189- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
18190+#ifdef CONFIG_COMPAT_VDSO
18191+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
18192+#else
18193+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
18194+#endif
18195 #else /* Not PAE */
18196
18197 page_pde_offset = (__PAGE_OFFSET >> 20);
18198@@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
18199 movl %eax, pa(max_pfn_mapped)
18200
18201 /* Do early initialization of the fixmap area */
18202- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
18203- movl %eax,pa(swapper_pg_dir+0xffc)
18204+#ifdef CONFIG_COMPAT_VDSO
18205+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
18206+#else
18207+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
18208+#endif
18209 #endif
18210 jmp 3f
18211 /*
18212@@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
18213 movl %eax,%es
18214 movl %eax,%fs
18215 movl %eax,%gs
18216+ movl pa(stack_start),%ecx
18217+ movl %eax,%ss
18218+ leal -__PAGE_OFFSET(%ecx),%esp
18219 #endif /* CONFIG_SMP */
18220 3:
18221
18222@@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
18223 orl %edx,%eax
18224 movl %eax,%cr4
18225
18226+#ifdef CONFIG_X86_PAE
18227 btl $5, %eax # check if PAE is enabled
18228 jnc 6f
18229
18230@@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
18231 cpuid
18232 cmpl $0x80000000, %eax
18233 jbe 6f
18234+
18235+ /* Clear bogus XD_DISABLE bits */
18236+ call verify_cpu
18237+
18238 mov $0x80000001, %eax
18239 cpuid
18240 /* Execute Disable bit supported? */
18241@@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
18242 jnc 6f
18243
18244 /* Setup EFER (Extended Feature Enable Register) */
18245- movl $0xc0000080, %ecx
18246+ movl $MSR_EFER, %ecx
18247 rdmsr
18248
18249 btsl $11, %eax
18250 /* Make changes effective */
18251 wrmsr
18252
18253+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
18254+ movl $1,pa(nx_enabled)
18255+#endif
18256+
18257 6:
18258
18259 /*
18260@@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
18261 movl %eax,%cr0 /* ..and set paging (PG) bit */
18262 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
18263 1:
18264- /* Set up the stack pointer */
18265- lss stack_start,%esp
18266+ /* Shift the stack pointer to a virtual address */
18267+ addl $__PAGE_OFFSET, %esp
18268
18269 /*
18270 * Initialize eflags. Some BIOS's leave bits like NT set. This would
18271@@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
18272
18273 #ifdef CONFIG_SMP
18274 cmpb $0, ready
18275- jz 1f /* Initial CPU cleans BSS */
18276- jmp checkCPUtype
18277-1:
18278+ jnz checkCPUtype
18279 #endif /* CONFIG_SMP */
18280
18281 /*
18282@@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
18283 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
18284 movl %eax,%ss # after changing gdt.
18285
18286- movl $(__USER_DS),%eax # DS/ES contains default USER segment
18287+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
18288 movl %eax,%ds
18289 movl %eax,%es
18290
18291@@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
18292 */
18293 cmpb $0,ready
18294 jne 1f
18295- movl $per_cpu__gdt_page,%eax
18296+ movl $cpu_gdt_table,%eax
18297 movl $per_cpu__stack_canary,%ecx
18298+#ifdef CONFIG_SMP
18299+ addl $__per_cpu_load,%ecx
18300+#endif
18301 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
18302 shrl $16, %ecx
18303 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
18304 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
18305 1:
18306-#endif
18307 movl $(__KERNEL_STACK_CANARY),%eax
18308+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
18309+ movl $(__USER_DS),%eax
18310+#else
18311+ xorl %eax,%eax
18312+#endif
18313 movl %eax,%gs
18314
18315 xorl %eax,%eax # Clear LDT
18316@@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
18317
18318 cld # gcc2 wants the direction flag cleared at all times
18319 pushl $0 # fake return address for unwinder
18320-#ifdef CONFIG_SMP
18321- movb ready, %cl
18322 movb $1, ready
18323- cmpb $0,%cl # the first CPU calls start_kernel
18324- je 1f
18325- movl (stack_start), %esp
18326-1:
18327-#endif /* CONFIG_SMP */
18328 jmp *(initial_code)
18329
18330 /*
18331@@ -546,22 +631,22 @@ early_page_fault:
18332 jmp early_fault
18333
18334 early_fault:
18335- cld
18336 #ifdef CONFIG_PRINTK
18337+ cmpl $1,%ss:early_recursion_flag
18338+ je hlt_loop
18339+ incl %ss:early_recursion_flag
18340+ cld
18341 pusha
18342 movl $(__KERNEL_DS),%eax
18343 movl %eax,%ds
18344 movl %eax,%es
18345- cmpl $2,early_recursion_flag
18346- je hlt_loop
18347- incl early_recursion_flag
18348 movl %cr2,%eax
18349 pushl %eax
18350 pushl %edx /* trapno */
18351 pushl $fault_msg
18352 call printk
18353+; call dump_stack
18354 #endif
18355- call dump_stack
18356 hlt_loop:
18357 hlt
18358 jmp hlt_loop
18359@@ -569,8 +654,11 @@ hlt_loop:
18360 /* This is the default interrupt "handler" :-) */
18361 ALIGN
18362 ignore_int:
18363- cld
18364 #ifdef CONFIG_PRINTK
18365+ cmpl $2,%ss:early_recursion_flag
18366+ je hlt_loop
18367+ incl %ss:early_recursion_flag
18368+ cld
18369 pushl %eax
18370 pushl %ecx
18371 pushl %edx
18372@@ -579,9 +667,6 @@ ignore_int:
18373 movl $(__KERNEL_DS),%eax
18374 movl %eax,%ds
18375 movl %eax,%es
18376- cmpl $2,early_recursion_flag
18377- je hlt_loop
18378- incl early_recursion_flag
18379 pushl 16(%esp)
18380 pushl 24(%esp)
18381 pushl 32(%esp)
18382@@ -600,6 +685,8 @@ ignore_int:
18383 #endif
18384 iret
18385
18386+#include "verify_cpu.S"
18387+
18388 __REFDATA
18389 .align 4
18390 ENTRY(initial_code)
18391@@ -610,31 +697,47 @@ ENTRY(initial_page_table)
18392 /*
18393 * BSS section
18394 */
18395-__PAGE_ALIGNED_BSS
18396- .align PAGE_SIZE_asm
18397 #ifdef CONFIG_X86_PAE
18398+.section .swapper_pg_pmd,"a",@progbits
18399 swapper_pg_pmd:
18400 .fill 1024*KPMDS,4,0
18401 #else
18402+.section .swapper_pg_dir,"a",@progbits
18403 ENTRY(swapper_pg_dir)
18404 .fill 1024,4,0
18405 #endif
18406+.section .swapper_pg_fixmap,"a",@progbits
18407 swapper_pg_fixmap:
18408 .fill 1024,4,0
18409 #ifdef CONFIG_X86_TRAMPOLINE
18410+.section .trampoline_pg_dir,"a",@progbits
18411 ENTRY(trampoline_pg_dir)
18412+#ifdef CONFIG_X86_PAE
18413+ .fill 4,8,0
18414+#else
18415 .fill 1024,4,0
18416 #endif
18417+#endif
18418+
18419+.section .empty_zero_page,"a",@progbits
18420 ENTRY(empty_zero_page)
18421 .fill 4096,1,0
18422
18423 /*
18424+ * The IDT has to be page-aligned to simplify the Pentium
18425+ * F0 0F bug workaround.. We have a special link segment
18426+ * for this.
18427+ */
18428+.section .idt,"a",@progbits
18429+ENTRY(idt_table)
18430+ .fill 256,8,0
18431+
18432+/*
18433 * This starts the data section.
18434 */
18435 #ifdef CONFIG_X86_PAE
18436-__PAGE_ALIGNED_DATA
18437- /* Page-aligned for the benefit of paravirt? */
18438- .align PAGE_SIZE_asm
18439+.section .swapper_pg_dir,"a",@progbits
18440+
18441 ENTRY(swapper_pg_dir)
18442 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
18443 # if KPMDS == 3
18444@@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
18445 # error "Kernel PMDs should be 1, 2 or 3"
18446 # endif
18447 .align PAGE_SIZE_asm /* needs to be page-sized too */
18448+
18449+#ifdef CONFIG_PAX_PER_CPU_PGD
18450+ENTRY(cpu_pgd)
18451+ .rept NR_CPUS
18452+ .fill 4,8,0
18453+ .endr
18454+#endif
18455+
18456 #endif
18457
18458 .data
18459+.balign 4
18460 ENTRY(stack_start)
18461- .long init_thread_union+THREAD_SIZE
18462- .long __BOOT_DS
18463+ .long init_thread_union+THREAD_SIZE-8
18464
18465 ready: .byte 0
18466
18467+.section .rodata,"a",@progbits
18468 early_recursion_flag:
18469 .long 0
18470
18471@@ -697,7 +809,7 @@ fault_msg:
18472 .word 0 # 32 bit align gdt_desc.address
18473 boot_gdt_descr:
18474 .word __BOOT_DS+7
18475- .long boot_gdt - __PAGE_OFFSET
18476+ .long pa(boot_gdt)
18477
18478 .word 0 # 32-bit align idt_desc.address
18479 idt_descr:
18480@@ -708,7 +820,7 @@ idt_descr:
18481 .word 0 # 32 bit align gdt_desc.address
18482 ENTRY(early_gdt_descr)
18483 .word GDT_ENTRIES*8-1
18484- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
18485+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
18486
18487 /*
18488 * The boot_gdt must mirror the equivalent in setup.S and is
18489@@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
18490 .align L1_CACHE_BYTES
18491 ENTRY(boot_gdt)
18492 .fill GDT_ENTRY_BOOT_CS,8,0
18493- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
18494- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
18495+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
18496+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
18497+
18498+ .align PAGE_SIZE_asm
18499+ENTRY(cpu_gdt_table)
18500+ .rept NR_CPUS
18501+ .quad 0x0000000000000000 /* NULL descriptor */
18502+ .quad 0x0000000000000000 /* 0x0b reserved */
18503+ .quad 0x0000000000000000 /* 0x13 reserved */
18504+ .quad 0x0000000000000000 /* 0x1b reserved */
18505+
18506+#ifdef CONFIG_PAX_KERNEXEC
18507+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
18508+#else
18509+ .quad 0x0000000000000000 /* 0x20 unused */
18510+#endif
18511+
18512+ .quad 0x0000000000000000 /* 0x28 unused */
18513+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
18514+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
18515+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
18516+ .quad 0x0000000000000000 /* 0x4b reserved */
18517+ .quad 0x0000000000000000 /* 0x53 reserved */
18518+ .quad 0x0000000000000000 /* 0x5b reserved */
18519+
18520+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
18521+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
18522+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
18523+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
18524+
18525+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
18526+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
18527+
18528+ /*
18529+ * Segments used for calling PnP BIOS have byte granularity.
18530+ * The code segments and data segments have fixed 64k limits,
18531+ * the transfer segment sizes are set at run time.
18532+ */
18533+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
18534+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
18535+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
18536+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
18537+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
18538+
18539+ /*
18540+ * The APM segments have byte granularity and their bases
18541+ * are set at run time. All have 64k limits.
18542+ */
18543+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
18544+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
18545+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
18546+
18547+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
18548+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
18549+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
18550+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
18551+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
18552+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
18553+
18554+ /* Be sure this is zeroed to avoid false validations in Xen */
18555+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
18556+ .endr
18557diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
18558index 780cd92..758b2a6 100644
18559--- a/arch/x86/kernel/head_64.S
18560+++ b/arch/x86/kernel/head_64.S
18561@@ -19,6 +19,8 @@
18562 #include <asm/cache.h>
18563 #include <asm/processor-flags.h>
18564 #include <asm/percpu.h>
18565+#include <asm/cpufeature.h>
18566+#include <asm/alternative-asm.h>
18567
18568 #ifdef CONFIG_PARAVIRT
18569 #include <asm/asm-offsets.h>
18570@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
18571 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
18572 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
18573 L3_START_KERNEL = pud_index(__START_KERNEL_map)
18574+L4_VMALLOC_START = pgd_index(VMALLOC_START)
18575+L3_VMALLOC_START = pud_index(VMALLOC_START)
18576+L4_VMALLOC_END = pgd_index(VMALLOC_END)
18577+L3_VMALLOC_END = pud_index(VMALLOC_END)
18578+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
18579+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
18580
18581 .text
18582 __HEAD
18583@@ -85,35 +93,23 @@ startup_64:
18584 */
18585 addq %rbp, init_level4_pgt + 0(%rip)
18586 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
18587+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
18588+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
18589+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
18590 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
18591
18592 addq %rbp, level3_ident_pgt + 0(%rip)
18593+#ifndef CONFIG_XEN
18594+ addq %rbp, level3_ident_pgt + 8(%rip)
18595+#endif
18596
18597- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
18598- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
18599+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
18600+
18601+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
18602+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
18603
18604 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
18605-
18606- /* Add an Identity mapping if I am above 1G */
18607- leaq _text(%rip), %rdi
18608- andq $PMD_PAGE_MASK, %rdi
18609-
18610- movq %rdi, %rax
18611- shrq $PUD_SHIFT, %rax
18612- andq $(PTRS_PER_PUD - 1), %rax
18613- jz ident_complete
18614-
18615- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
18616- leaq level3_ident_pgt(%rip), %rbx
18617- movq %rdx, 0(%rbx, %rax, 8)
18618-
18619- movq %rdi, %rax
18620- shrq $PMD_SHIFT, %rax
18621- andq $(PTRS_PER_PMD - 1), %rax
18622- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
18623- leaq level2_spare_pgt(%rip), %rbx
18624- movq %rdx, 0(%rbx, %rax, 8)
18625-ident_complete:
18626+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
18627
18628 /*
18629 * Fixup the kernel text+data virtual addresses. Note that
18630@@ -161,8 +157,8 @@ ENTRY(secondary_startup_64)
18631 * after the boot processor executes this code.
18632 */
18633
18634- /* Enable PAE mode and PGE */
18635- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
18636+ /* Enable PAE mode and PSE/PGE */
18637+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
18638 movq %rax, %cr4
18639
18640 /* Setup early boot stage 4 level pagetables. */
18641@@ -184,9 +180,16 @@ ENTRY(secondary_startup_64)
18642 movl $MSR_EFER, %ecx
18643 rdmsr
18644 btsl $_EFER_SCE, %eax /* Enable System Call */
18645- btl $20,%edi /* No Execute supported? */
18646+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
18647 jnc 1f
18648 btsl $_EFER_NX, %eax
18649+ leaq init_level4_pgt(%rip), %rdi
18650+#ifndef CONFIG_EFI
18651+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
18652+#endif
18653+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
18654+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
18655+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
18656 1: wrmsr /* Make changes effective */
18657
18658 /* Setup cr0 */
18659@@ -249,6 +252,7 @@ ENTRY(secondary_startup_64)
18660 * jump. In addition we need to ensure %cs is set so we make this
18661 * a far return.
18662 */
18663+ pax_set_fptr_mask
18664 movq initial_code(%rip),%rax
18665 pushq $0 # fake return address to stop unwinder
18666 pushq $__KERNEL_CS # set correct cs
18667@@ -262,16 +266,16 @@ ENTRY(secondary_startup_64)
18668 .quad x86_64_start_kernel
18669 ENTRY(initial_gs)
18670 .quad INIT_PER_CPU_VAR(irq_stack_union)
18671- __FINITDATA
18672
18673 ENTRY(stack_start)
18674 .quad init_thread_union+THREAD_SIZE-8
18675 .word 0
18676+ __FINITDATA
18677
18678 bad_address:
18679 jmp bad_address
18680
18681- .section ".init.text","ax"
18682+ __INIT
18683 #ifdef CONFIG_EARLY_PRINTK
18684 .globl early_idt_handlers
18685 early_idt_handlers:
18686@@ -316,18 +320,23 @@ ENTRY(early_idt_handler)
18687 #endif /* EARLY_PRINTK */
18688 1: hlt
18689 jmp 1b
18690+ .previous
18691
18692 #ifdef CONFIG_EARLY_PRINTK
18693+ __INITDATA
18694 early_recursion_flag:
18695 .long 0
18696+ .previous
18697
18698+ .section .rodata,"a",@progbits
18699 early_idt_msg:
18700 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
18701 early_idt_ripmsg:
18702 .asciz "RIP %s\n"
18703+ .previous
18704 #endif /* CONFIG_EARLY_PRINTK */
18705- .previous
18706
18707+ .section .rodata,"a",@progbits
18708 #define NEXT_PAGE(name) \
18709 .balign PAGE_SIZE; \
18710 ENTRY(name)
18711@@ -350,13 +359,41 @@ NEXT_PAGE(init_level4_pgt)
18712 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18713 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
18714 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18715+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
18716+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
18717+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
18718+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
18719+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
18720+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
18721 .org init_level4_pgt + L4_START_KERNEL*8, 0
18722 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
18723 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
18724
18725+#ifdef CONFIG_PAX_PER_CPU_PGD
18726+NEXT_PAGE(cpu_pgd)
18727+ .rept NR_CPUS
18728+ .fill 512,8,0
18729+ .endr
18730+#endif
18731+
18732 NEXT_PAGE(level3_ident_pgt)
18733 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18734+#ifdef CONFIG_XEN
18735 .fill 511,8,0
18736+#else
18737+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
18738+ .fill 510,8,0
18739+#endif
18740+
18741+NEXT_PAGE(level3_vmalloc_start_pgt)
18742+ .fill 512,8,0
18743+
18744+NEXT_PAGE(level3_vmalloc_end_pgt)
18745+ .fill 512,8,0
18746+
18747+NEXT_PAGE(level3_vmemmap_pgt)
18748+ .fill L3_VMEMMAP_START,8,0
18749+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
18750
18751 NEXT_PAGE(level3_kernel_pgt)
18752 .fill L3_START_KERNEL,8,0
18753@@ -364,20 +401,23 @@ NEXT_PAGE(level3_kernel_pgt)
18754 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
18755 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
18756
18757+NEXT_PAGE(level2_vmemmap_pgt)
18758+ .fill 512,8,0
18759+
18760 NEXT_PAGE(level2_fixmap_pgt)
18761- .fill 506,8,0
18762- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
18763- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
18764- .fill 5,8,0
18765+ .fill 507,8,0
18766+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
18767+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
18768+ .fill 4,8,0
18769
18770-NEXT_PAGE(level1_fixmap_pgt)
18771+NEXT_PAGE(level1_vsyscall_pgt)
18772 .fill 512,8,0
18773
18774-NEXT_PAGE(level2_ident_pgt)
18775- /* Since I easily can, map the first 1G.
18776+ /* Since I easily can, map the first 2G.
18777 * Don't set NX because code runs from these pages.
18778 */
18779- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
18780+NEXT_PAGE(level2_ident_pgt)
18781+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
18782
18783 NEXT_PAGE(level2_kernel_pgt)
18784 /*
18785@@ -390,33 +430,55 @@ NEXT_PAGE(level2_kernel_pgt)
18786 * If you want to increase this then increase MODULES_VADDR
18787 * too.)
18788 */
18789- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
18790- KERNEL_IMAGE_SIZE/PMD_SIZE)
18791-
18792-NEXT_PAGE(level2_spare_pgt)
18793- .fill 512, 8, 0
18794+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
18795
18796 #undef PMDS
18797 #undef NEXT_PAGE
18798
18799- .data
18800+ .align PAGE_SIZE
18801+ENTRY(cpu_gdt_table)
18802+ .rept NR_CPUS
18803+ .quad 0x0000000000000000 /* NULL descriptor */
18804+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
18805+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
18806+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
18807+ .quad 0x00cffb000000ffff /* __USER32_CS */
18808+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
18809+ .quad 0x00affb000000ffff /* __USER_CS */
18810+
18811+#ifdef CONFIG_PAX_KERNEXEC
18812+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
18813+#else
18814+ .quad 0x0 /* unused */
18815+#endif
18816+
18817+ .quad 0,0 /* TSS */
18818+ .quad 0,0 /* LDT */
18819+ .quad 0,0,0 /* three TLS descriptors */
18820+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
18821+ /* asm/segment.h:GDT_ENTRIES must match this */
18822+
18823+ /* zero the remaining page */
18824+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
18825+ .endr
18826+
18827 .align 16
18828 .globl early_gdt_descr
18829 early_gdt_descr:
18830 .word GDT_ENTRIES*8-1
18831 early_gdt_descr_base:
18832- .quad INIT_PER_CPU_VAR(gdt_page)
18833+ .quad cpu_gdt_table
18834
18835 ENTRY(phys_base)
18836 /* This must match the first entry in level2_kernel_pgt */
18837 .quad 0x0000000000000000
18838
18839 #include "../../x86/xen/xen-head.S"
18840-
18841- .section .bss, "aw", @nobits
18842+
18843+ .section .rodata,"a",@progbits
18844 .align L1_CACHE_BYTES
18845 ENTRY(idt_table)
18846- .skip IDT_ENTRIES * 16
18847+ .fill 512,8,0
18848
18849 __PAGE_ALIGNED_BSS
18850 .align PAGE_SIZE
18851diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
18852index 9c3bd4a..e1d9b35 100644
18853--- a/arch/x86/kernel/i386_ksyms_32.c
18854+++ b/arch/x86/kernel/i386_ksyms_32.c
18855@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
18856 EXPORT_SYMBOL(cmpxchg8b_emu);
18857 #endif
18858
18859+EXPORT_SYMBOL_GPL(cpu_gdt_table);
18860+
18861 /* Networking helper routines. */
18862 EXPORT_SYMBOL(csum_partial_copy_generic);
18863+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
18864+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
18865
18866 EXPORT_SYMBOL(__get_user_1);
18867 EXPORT_SYMBOL(__get_user_2);
18868@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
18869
18870 EXPORT_SYMBOL(csum_partial);
18871 EXPORT_SYMBOL(empty_zero_page);
18872+
18873+#ifdef CONFIG_PAX_KERNEXEC
18874+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
18875+#endif
18876diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
18877index f2f8540..d845509 100644
18878--- a/arch/x86/kernel/i387.c
18879+++ b/arch/x86/kernel/i387.c
18880@@ -176,6 +176,9 @@ int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
18881
18882 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
18883 unsigned int pos, unsigned int count,
18884+ void *kbuf, void __user *ubuf) __size_overflow(4);
18885+int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
18886+ unsigned int pos, unsigned int count,
18887 void *kbuf, void __user *ubuf)
18888 {
18889 int ret;
18890@@ -193,6 +196,9 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
18891
18892 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
18893 unsigned int pos, unsigned int count,
18894+ const void *kbuf, const void __user *ubuf) __size_overflow(4);
18895+int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
18896+ unsigned int pos, unsigned int count,
18897 const void *kbuf, const void __user *ubuf)
18898 {
18899 int ret;
18900@@ -365,6 +371,9 @@ static void convert_to_fxsr(struct task_struct *tsk,
18901
18902 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
18903 unsigned int pos, unsigned int count,
18904+ void *kbuf, void __user *ubuf) __size_overflow(3,4);
18905+int fpregs_get(struct task_struct *target, const struct user_regset *regset,
18906+ unsigned int pos, unsigned int count,
18907 void *kbuf, void __user *ubuf)
18908 {
18909 struct user_i387_ia32_struct env;
18910@@ -395,6 +404,9 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
18911
18912 int fpregs_set(struct task_struct *target, const struct user_regset *regset,
18913 unsigned int pos, unsigned int count,
18914+ const void *kbuf, const void __user *ubuf) __size_overflow(3,4);
18915+int fpregs_set(struct task_struct *target, const struct user_regset *regset,
18916+ unsigned int pos, unsigned int count,
18917 const void *kbuf, const void __user *ubuf)
18918 {
18919 struct user_i387_ia32_struct env;
18920@@ -540,6 +552,8 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
18921 }
18922
18923 static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
18924+ unsigned int size) __size_overflow(2);
18925+static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
18926 unsigned int size)
18927 {
18928 struct task_struct *tsk = current;
18929diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
18930index df89102..a244320 100644
18931--- a/arch/x86/kernel/i8259.c
18932+++ b/arch/x86/kernel/i8259.c
18933@@ -208,7 +208,7 @@ spurious_8259A_irq:
18934 "spurious 8259A interrupt: IRQ%d.\n", irq);
18935 spurious_irq_mask |= irqmask;
18936 }
18937- atomic_inc(&irq_err_count);
18938+ atomic_inc_unchecked(&irq_err_count);
18939 /*
18940 * Theoretically we do not have to handle this IRQ,
18941 * but in Linux this does not cause problems and is
18942diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
18943index 3a54dcb..1c22348 100644
18944--- a/arch/x86/kernel/init_task.c
18945+++ b/arch/x86/kernel/init_task.c
18946@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
18947 * way process stacks are handled. This is done by having a special
18948 * "init_task" linker map entry..
18949 */
18950-union thread_union init_thread_union __init_task_data =
18951- { INIT_THREAD_INFO(init_task) };
18952+union thread_union init_thread_union __init_task_data;
18953
18954 /*
18955 * Initial task structure.
18956@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
18957 * section. Since TSS's are completely CPU-local, we want them
18958 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
18959 */
18960-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
18961-
18962+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
18963+EXPORT_SYMBOL(init_tss);
18964diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
18965index 99c4d30..74c84e9 100644
18966--- a/arch/x86/kernel/ioport.c
18967+++ b/arch/x86/kernel/ioport.c
18968@@ -6,6 +6,7 @@
18969 #include <linux/sched.h>
18970 #include <linux/kernel.h>
18971 #include <linux/capability.h>
18972+#include <linux/security.h>
18973 #include <linux/errno.h>
18974 #include <linux/types.h>
18975 #include <linux/ioport.h>
18976@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
18977
18978 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
18979 return -EINVAL;
18980+#ifdef CONFIG_GRKERNSEC_IO
18981+ if (turn_on && grsec_disable_privio) {
18982+ gr_handle_ioperm();
18983+ return -EPERM;
18984+ }
18985+#endif
18986 if (turn_on && !capable(CAP_SYS_RAWIO))
18987 return -EPERM;
18988
18989@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
18990 * because the ->io_bitmap_max value must match the bitmap
18991 * contents:
18992 */
18993- tss = &per_cpu(init_tss, get_cpu());
18994+ tss = init_tss + get_cpu();
18995
18996 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
18997
18998@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
18999 return -EINVAL;
19000 /* Trying to gain more privileges? */
19001 if (level > old) {
19002+#ifdef CONFIG_GRKERNSEC_IO
19003+ if (grsec_disable_privio) {
19004+ gr_handle_iopl();
19005+ return -EPERM;
19006+ }
19007+#endif
19008 if (!capable(CAP_SYS_RAWIO))
19009 return -EPERM;
19010 }
19011diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
19012index 04bbd52..83a07d9 100644
19013--- a/arch/x86/kernel/irq.c
19014+++ b/arch/x86/kernel/irq.c
19015@@ -15,7 +15,7 @@
19016 #include <asm/mce.h>
19017 #include <asm/hw_irq.h>
19018
19019-atomic_t irq_err_count;
19020+atomic_unchecked_t irq_err_count;
19021
19022 /* Function pointer for generic interrupt vector handling */
19023 void (*generic_interrupt_extension)(void) = NULL;
19024@@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_file *p, int prec)
19025 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
19026 seq_printf(p, " Machine check polls\n");
19027 #endif
19028- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
19029+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
19030 #if defined(CONFIG_X86_IO_APIC)
19031- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
19032+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
19033 #endif
19034 return 0;
19035 }
19036@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
19037
19038 u64 arch_irq_stat(void)
19039 {
19040- u64 sum = atomic_read(&irq_err_count);
19041+ u64 sum = atomic_read_unchecked(&irq_err_count);
19042
19043 #ifdef CONFIG_X86_IO_APIC
19044- sum += atomic_read(&irq_mis_count);
19045+ sum += atomic_read_unchecked(&irq_mis_count);
19046 #endif
19047 return sum;
19048 }
19049diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
19050index 7d35d0f..03f1d52 100644
19051--- a/arch/x86/kernel/irq_32.c
19052+++ b/arch/x86/kernel/irq_32.c
19053@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
19054 __asm__ __volatile__("andl %%esp,%0" :
19055 "=r" (sp) : "0" (THREAD_SIZE - 1));
19056
19057- return sp < (sizeof(struct thread_info) + STACK_WARN);
19058+ return sp < STACK_WARN;
19059 }
19060
19061 static void print_stack_overflow(void)
19062@@ -54,9 +54,9 @@ static inline void print_stack_overflow(void) { }
19063 * per-CPU IRQ handling contexts (thread information and stack)
19064 */
19065 union irq_ctx {
19066- struct thread_info tinfo;
19067- u32 stack[THREAD_SIZE/sizeof(u32)];
19068-} __attribute__((aligned(PAGE_SIZE)));
19069+ unsigned long previous_esp;
19070+ u32 stack[THREAD_SIZE/sizeof(u32)];
19071+} __attribute__((aligned(THREAD_SIZE)));
19072
19073 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
19074 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
19075@@ -78,10 +78,9 @@ static void call_on_stack(void *func, void *stack)
19076 static inline int
19077 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
19078 {
19079- union irq_ctx *curctx, *irqctx;
19080+ union irq_ctx *irqctx;
19081 u32 *isp, arg1, arg2;
19082
19083- curctx = (union irq_ctx *) current_thread_info();
19084 irqctx = __get_cpu_var(hardirq_ctx);
19085
19086 /*
19087@@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
19088 * handler) we can't do that and just have to keep using the
19089 * current stack (which is the irq stack already after all)
19090 */
19091- if (unlikely(curctx == irqctx))
19092+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
19093 return 0;
19094
19095 /* build the stack frame on the IRQ stack */
19096- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
19097- irqctx->tinfo.task = curctx->tinfo.task;
19098- irqctx->tinfo.previous_esp = current_stack_pointer;
19099+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
19100+ irqctx->previous_esp = current_stack_pointer;
19101
19102- /*
19103- * Copy the softirq bits in preempt_count so that the
19104- * softirq checks work in the hardirq context.
19105- */
19106- irqctx->tinfo.preempt_count =
19107- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
19108- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
19109+#ifdef CONFIG_PAX_MEMORY_UDEREF
19110+ __set_fs(MAKE_MM_SEG(0));
19111+#endif
19112
19113 if (unlikely(overflow))
19114 call_on_stack(print_stack_overflow, isp);
19115@@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
19116 : "0" (irq), "1" (desc), "2" (isp),
19117 "D" (desc->handle_irq)
19118 : "memory", "cc", "ecx");
19119+
19120+#ifdef CONFIG_PAX_MEMORY_UDEREF
19121+ __set_fs(current_thread_info()->addr_limit);
19122+#endif
19123+
19124 return 1;
19125 }
19126
19127@@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
19128 */
19129 void __cpuinit irq_ctx_init(int cpu)
19130 {
19131- union irq_ctx *irqctx;
19132-
19133 if (per_cpu(hardirq_ctx, cpu))
19134 return;
19135
19136- irqctx = &per_cpu(hardirq_stack, cpu);
19137- irqctx->tinfo.task = NULL;
19138- irqctx->tinfo.exec_domain = NULL;
19139- irqctx->tinfo.cpu = cpu;
19140- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
19141- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
19142-
19143- per_cpu(hardirq_ctx, cpu) = irqctx;
19144-
19145- irqctx = &per_cpu(softirq_stack, cpu);
19146- irqctx->tinfo.task = NULL;
19147- irqctx->tinfo.exec_domain = NULL;
19148- irqctx->tinfo.cpu = cpu;
19149- irqctx->tinfo.preempt_count = 0;
19150- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
19151-
19152- per_cpu(softirq_ctx, cpu) = irqctx;
19153+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
19154+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
19155
19156 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
19157 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
19158@@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
19159 asmlinkage void do_softirq(void)
19160 {
19161 unsigned long flags;
19162- struct thread_info *curctx;
19163 union irq_ctx *irqctx;
19164 u32 *isp;
19165
19166@@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
19167 local_irq_save(flags);
19168
19169 if (local_softirq_pending()) {
19170- curctx = current_thread_info();
19171 irqctx = __get_cpu_var(softirq_ctx);
19172- irqctx->tinfo.task = curctx->task;
19173- irqctx->tinfo.previous_esp = current_stack_pointer;
19174+ irqctx->previous_esp = current_stack_pointer;
19175
19176 /* build the stack frame on the softirq stack */
19177- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
19178+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
19179+
19180+#ifdef CONFIG_PAX_MEMORY_UDEREF
19181+ __set_fs(MAKE_MM_SEG(0));
19182+#endif
19183
19184 call_on_stack(__do_softirq, isp);
19185+
19186+#ifdef CONFIG_PAX_MEMORY_UDEREF
19187+ __set_fs(current_thread_info()->addr_limit);
19188+#endif
19189+
19190 /*
19191 * Shouldnt happen, we returned above if in_interrupt():
19192 */
19193diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
19194index 8d82a77..0baf312 100644
19195--- a/arch/x86/kernel/kgdb.c
19196+++ b/arch/x86/kernel/kgdb.c
19197@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
19198
19199 /* clear the trace bit */
19200 linux_regs->flags &= ~X86_EFLAGS_TF;
19201- atomic_set(&kgdb_cpu_doing_single_step, -1);
19202+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
19203
19204 /* set the trace bit if we're stepping */
19205 if (remcomInBuffer[0] == 's') {
19206 linux_regs->flags |= X86_EFLAGS_TF;
19207 kgdb_single_step = 1;
19208- atomic_set(&kgdb_cpu_doing_single_step,
19209+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
19210 raw_smp_processor_id());
19211 }
19212
19213@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
19214 break;
19215
19216 case DIE_DEBUG:
19217- if (atomic_read(&kgdb_cpu_doing_single_step) ==
19218+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
19219 raw_smp_processor_id()) {
19220 if (user_mode(regs))
19221 return single_step_cont(regs, args);
19222@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
19223 return instruction_pointer(regs);
19224 }
19225
19226-struct kgdb_arch arch_kgdb_ops = {
19227+const struct kgdb_arch arch_kgdb_ops = {
19228 /* Breakpoint instruction: */
19229 .gdb_bpt_instr = { 0xcc },
19230 .flags = KGDB_HW_BREAKPOINT,
19231diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
19232index 7a67820..70ea187 100644
19233--- a/arch/x86/kernel/kprobes.c
19234+++ b/arch/x86/kernel/kprobes.c
19235@@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *from, void *to)
19236 char op;
19237 s32 raddr;
19238 } __attribute__((packed)) * jop;
19239- jop = (struct __arch_jmp_op *)from;
19240+
19241+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
19242+
19243+ pax_open_kernel();
19244 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
19245 jop->op = RELATIVEJUMP_INSTRUCTION;
19246+ pax_close_kernel();
19247 }
19248
19249 /*
19250@@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
19251 kprobe_opcode_t opcode;
19252 kprobe_opcode_t *orig_opcodes = opcodes;
19253
19254- if (search_exception_tables((unsigned long)opcodes))
19255+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
19256 return 0; /* Page fault may occur on this address. */
19257
19258 retry:
19259@@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct kprobe *p)
19260 disp = (u8 *) p->addr + *((s32 *) insn) -
19261 (u8 *) p->ainsn.insn;
19262 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
19263+ pax_open_kernel();
19264 *(s32 *)insn = (s32) disp;
19265+ pax_close_kernel();
19266 }
19267 }
19268 #endif
19269@@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct kprobe *p)
19270
19271 static void __kprobes arch_copy_kprobe(struct kprobe *p)
19272 {
19273- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
19274+ pax_open_kernel();
19275+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
19276+ pax_close_kernel();
19277
19278 fix_riprel(p);
19279
19280- if (can_boost(p->addr))
19281+ if (can_boost(ktla_ktva(p->addr)))
19282 p->ainsn.boostable = 0;
19283 else
19284 p->ainsn.boostable = -1;
19285
19286- p->opcode = *p->addr;
19287+ p->opcode = *(ktla_ktva(p->addr));
19288 }
19289
19290 int __kprobes arch_prepare_kprobe(struct kprobe *p)
19291@@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
19292 if (p->opcode == BREAKPOINT_INSTRUCTION)
19293 regs->ip = (unsigned long)p->addr;
19294 else
19295- regs->ip = (unsigned long)p->ainsn.insn;
19296+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
19297 }
19298
19299 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
19300@@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
19301 if (p->ainsn.boostable == 1 && !p->post_handler) {
19302 /* Boost up -- we can execute copied instructions directly */
19303 reset_current_kprobe();
19304- regs->ip = (unsigned long)p->ainsn.insn;
19305+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
19306 preempt_enable_no_resched();
19307 return;
19308 }
19309@@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
19310 struct kprobe_ctlblk *kcb;
19311
19312 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
19313- if (*addr != BREAKPOINT_INSTRUCTION) {
19314+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
19315 /*
19316 * The breakpoint instruction was removed right
19317 * after we hit it. Another cpu has removed
19318@@ -637,6 +645,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
19319 /* Skip orig_ax, ip, cs */
19320 " addq $24, %rsp\n"
19321 " popfq\n"
19322+#ifdef KERNEXEC_PLUGIN
19323+ " btsq $63,(%rsp)\n"
19324+#endif
19325 #else
19326 " pushf\n"
19327 /*
19328@@ -777,7 +788,7 @@ static void __kprobes resume_execution(struct kprobe *p,
19329 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
19330 {
19331 unsigned long *tos = stack_addr(regs);
19332- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
19333+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
19334 unsigned long orig_ip = (unsigned long)p->addr;
19335 kprobe_opcode_t *insn = p->ainsn.insn;
19336
19337@@ -960,7 +971,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
19338 struct die_args *args = data;
19339 int ret = NOTIFY_DONE;
19340
19341- if (args->regs && user_mode_vm(args->regs))
19342+ if (args->regs && user_mode(args->regs))
19343 return ret;
19344
19345 switch (val) {
19346diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
19347index 63b0ec8..6d92227 100644
19348--- a/arch/x86/kernel/kvm.c
19349+++ b/arch/x86/kernel/kvm.c
19350@@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
19351 pv_mmu_ops.set_pud = kvm_set_pud;
19352 #if PAGETABLE_LEVELS == 4
19353 pv_mmu_ops.set_pgd = kvm_set_pgd;
19354+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
19355 #endif
19356 #endif
19357 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
19358diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
19359index ec6ef60..d784780 100644
19360--- a/arch/x86/kernel/ldt.c
19361+++ b/arch/x86/kernel/ldt.c
19362@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
19363 if (reload) {
19364 #ifdef CONFIG_SMP
19365 preempt_disable();
19366- load_LDT(pc);
19367+ load_LDT_nolock(pc);
19368 if (!cpumask_equal(mm_cpumask(current->mm),
19369 cpumask_of(smp_processor_id())))
19370 smp_call_function(flush_ldt, current->mm, 1);
19371 preempt_enable();
19372 #else
19373- load_LDT(pc);
19374+ load_LDT_nolock(pc);
19375 #endif
19376 }
19377 if (oldsize) {
19378@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
19379 return err;
19380
19381 for (i = 0; i < old->size; i++)
19382- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
19383+ write_ldt_entry(new->ldt, i, old->ldt + i);
19384 return 0;
19385 }
19386
19387@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
19388 retval = copy_ldt(&mm->context, &old_mm->context);
19389 mutex_unlock(&old_mm->context.lock);
19390 }
19391+
19392+ if (tsk == current) {
19393+ mm->context.vdso = 0;
19394+
19395+#ifdef CONFIG_X86_32
19396+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19397+ mm->context.user_cs_base = 0UL;
19398+ mm->context.user_cs_limit = ~0UL;
19399+
19400+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
19401+ cpus_clear(mm->context.cpu_user_cs_mask);
19402+#endif
19403+
19404+#endif
19405+#endif
19406+
19407+ }
19408+
19409 return retval;
19410 }
19411
19412@@ -140,6 +158,7 @@ void destroy_context(struct mm_struct *mm)
19413 }
19414 }
19415
19416+static int read_ldt(void __user *ptr, unsigned long bytecount) __size_overflow(2);
19417 static int read_ldt(void __user *ptr, unsigned long bytecount)
19418 {
19419 int err;
19420@@ -229,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
19421 }
19422 }
19423
19424+#ifdef CONFIG_PAX_SEGMEXEC
19425+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
19426+ error = -EINVAL;
19427+ goto out_unlock;
19428+ }
19429+#endif
19430+
19431 fill_ldt(&ldt, &ldt_info);
19432 if (oldmode)
19433 ldt.avl = 0;
19434diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
19435index c1c429d..f02eaf9 100644
19436--- a/arch/x86/kernel/machine_kexec_32.c
19437+++ b/arch/x86/kernel/machine_kexec_32.c
19438@@ -26,7 +26,7 @@
19439 #include <asm/system.h>
19440 #include <asm/cacheflush.h>
19441
19442-static void set_idt(void *newidt, __u16 limit)
19443+static void set_idt(struct desc_struct *newidt, __u16 limit)
19444 {
19445 struct desc_ptr curidt;
19446
19447@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
19448 }
19449
19450
19451-static void set_gdt(void *newgdt, __u16 limit)
19452+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
19453 {
19454 struct desc_ptr curgdt;
19455
19456@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
19457 }
19458
19459 control_page = page_address(image->control_code_page);
19460- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
19461+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
19462
19463 relocate_kernel_ptr = control_page;
19464 page_list[PA_CONTROL_PAGE] = __pa(control_page);
19465diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
19466index 1e47679..e73449d 100644
19467--- a/arch/x86/kernel/microcode_amd.c
19468+++ b/arch/x86/kernel/microcode_amd.c
19469@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int cpu)
19470 uci->mc = NULL;
19471 }
19472
19473-static struct microcode_ops microcode_amd_ops = {
19474+static const struct microcode_ops microcode_amd_ops = {
19475 .request_microcode_user = request_microcode_user,
19476 .request_microcode_fw = request_microcode_fw,
19477 .collect_cpu_info = collect_cpu_info_amd,
19478@@ -372,7 +372,7 @@ static struct microcode_ops microcode_amd_ops = {
19479 .microcode_fini_cpu = microcode_fini_cpu_amd,
19480 };
19481
19482-struct microcode_ops * __init init_amd_microcode(void)
19483+const struct microcode_ops * __init init_amd_microcode(void)
19484 {
19485 return &microcode_amd_ops;
19486 }
19487diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
19488index 378e9a8..b5a6ea9 100644
19489--- a/arch/x86/kernel/microcode_core.c
19490+++ b/arch/x86/kernel/microcode_core.c
19491@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
19492
19493 #define MICROCODE_VERSION "2.00"
19494
19495-static struct microcode_ops *microcode_ops;
19496+static const struct microcode_ops *microcode_ops;
19497
19498 /*
19499 * Synchronization.
19500diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
19501index 0d334dd..5a709b5 100644
19502--- a/arch/x86/kernel/microcode_intel.c
19503+++ b/arch/x86/kernel/microcode_intel.c
19504@@ -441,15 +441,16 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
19505 return ret;
19506 }
19507
19508+static int get_ucode_user(void *to, const void *from, size_t n) __size_overflow(3);
19509 static int get_ucode_user(void *to, const void *from, size_t n)
19510 {
19511- return copy_from_user(to, from, n);
19512+ return copy_from_user(to, (const void __force_user *)from, n);
19513 }
19514
19515 static enum ucode_state
19516 request_microcode_user(int cpu, const void __user *buf, size_t size)
19517 {
19518- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
19519+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
19520 }
19521
19522 static void microcode_fini_cpu(int cpu)
19523@@ -460,7 +461,7 @@ static void microcode_fini_cpu(int cpu)
19524 uci->mc = NULL;
19525 }
19526
19527-static struct microcode_ops microcode_intel_ops = {
19528+static const struct microcode_ops microcode_intel_ops = {
19529 .request_microcode_user = request_microcode_user,
19530 .request_microcode_fw = request_microcode_fw,
19531 .collect_cpu_info = collect_cpu_info,
19532@@ -468,7 +469,7 @@ static struct microcode_ops microcode_intel_ops = {
19533 .microcode_fini_cpu = microcode_fini_cpu,
19534 };
19535
19536-struct microcode_ops * __init init_intel_microcode(void)
19537+const struct microcode_ops * __init init_intel_microcode(void)
19538 {
19539 return &microcode_intel_ops;
19540 }
19541diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
19542index 89f386f..9028f51 100644
19543--- a/arch/x86/kernel/module.c
19544+++ b/arch/x86/kernel/module.c
19545@@ -34,7 +34,7 @@
19546 #define DEBUGP(fmt...)
19547 #endif
19548
19549-void *module_alloc(unsigned long size)
19550+static void *__module_alloc(unsigned long size, pgprot_t prot)
19551 {
19552 struct vm_struct *area;
19553
19554@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
19555 if (!area)
19556 return NULL;
19557
19558- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
19559- PAGE_KERNEL_EXEC);
19560+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
19561+}
19562+
19563+void *module_alloc(unsigned long size)
19564+{
19565+
19566+#ifdef CONFIG_PAX_KERNEXEC
19567+ return __module_alloc(size, PAGE_KERNEL);
19568+#else
19569+ return __module_alloc(size, PAGE_KERNEL_EXEC);
19570+#endif
19571+
19572 }
19573
19574 /* Free memory returned from module_alloc */
19575@@ -58,6 +68,40 @@ void module_free(struct module *mod, void *module_region)
19576 vfree(module_region);
19577 }
19578
19579+#ifdef CONFIG_PAX_KERNEXEC
19580+#ifdef CONFIG_X86_32
19581+void *module_alloc_exec(unsigned long size)
19582+{
19583+ struct vm_struct *area;
19584+
19585+ if (size == 0)
19586+ return NULL;
19587+
19588+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
19589+ return area ? area->addr : NULL;
19590+}
19591+EXPORT_SYMBOL(module_alloc_exec);
19592+
19593+void module_free_exec(struct module *mod, void *module_region)
19594+{
19595+ vunmap(module_region);
19596+}
19597+EXPORT_SYMBOL(module_free_exec);
19598+#else
19599+void module_free_exec(struct module *mod, void *module_region)
19600+{
19601+ module_free(mod, module_region);
19602+}
19603+EXPORT_SYMBOL(module_free_exec);
19604+
19605+void *module_alloc_exec(unsigned long size)
19606+{
19607+ return __module_alloc(size, PAGE_KERNEL_RX);
19608+}
19609+EXPORT_SYMBOL(module_alloc_exec);
19610+#endif
19611+#endif
19612+
19613 /* We don't need anything special. */
19614 int module_frob_arch_sections(Elf_Ehdr *hdr,
19615 Elf_Shdr *sechdrs,
19616@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
19617 unsigned int i;
19618 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
19619 Elf32_Sym *sym;
19620- uint32_t *location;
19621+ uint32_t *plocation, location;
19622
19623 DEBUGP("Applying relocate section %u to %u\n", relsec,
19624 sechdrs[relsec].sh_info);
19625 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
19626 /* This is where to make the change */
19627- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
19628- + rel[i].r_offset;
19629+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
19630+ location = (uint32_t)plocation;
19631+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
19632+ plocation = ktla_ktva((void *)plocation);
19633 /* This is the symbol it is referring to. Note that all
19634 undefined symbols have been resolved. */
19635 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
19636@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
19637 switch (ELF32_R_TYPE(rel[i].r_info)) {
19638 case R_386_32:
19639 /* We add the value into the location given */
19640- *location += sym->st_value;
19641+ pax_open_kernel();
19642+ *plocation += sym->st_value;
19643+ pax_close_kernel();
19644 break;
19645 case R_386_PC32:
19646 /* Add the value, subtract its postition */
19647- *location += sym->st_value - (uint32_t)location;
19648+ pax_open_kernel();
19649+ *plocation += sym->st_value - location;
19650+ pax_close_kernel();
19651 break;
19652 default:
19653 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
19654@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
19655 case R_X86_64_NONE:
19656 break;
19657 case R_X86_64_64:
19658+ pax_open_kernel();
19659 *(u64 *)loc = val;
19660+ pax_close_kernel();
19661 break;
19662 case R_X86_64_32:
19663+ pax_open_kernel();
19664 *(u32 *)loc = val;
19665+ pax_close_kernel();
19666 if (val != *(u32 *)loc)
19667 goto overflow;
19668 break;
19669 case R_X86_64_32S:
19670+ pax_open_kernel();
19671 *(s32 *)loc = val;
19672+ pax_close_kernel();
19673 if ((s64)val != *(s32 *)loc)
19674 goto overflow;
19675 break;
19676 case R_X86_64_PC32:
19677 val -= (u64)loc;
19678+ pax_open_kernel();
19679 *(u32 *)loc = val;
19680+ pax_close_kernel();
19681+
19682 #if 0
19683 if ((s64)val != *(s32 *)loc)
19684 goto overflow;
19685diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
19686index 3a7c5a4..9191528 100644
19687--- a/arch/x86/kernel/paravirt-spinlocks.c
19688+++ b/arch/x86/kernel/paravirt-spinlocks.c
19689@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
19690 __raw_spin_lock(lock);
19691 }
19692
19693-struct pv_lock_ops pv_lock_ops = {
19694+struct pv_lock_ops pv_lock_ops __read_only = {
19695 #ifdef CONFIG_SMP
19696 .spin_is_locked = __ticket_spin_is_locked,
19697 .spin_is_contended = __ticket_spin_is_contended,
19698diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
19699index 1b1739d..dea6077 100644
19700--- a/arch/x86/kernel/paravirt.c
19701+++ b/arch/x86/kernel/paravirt.c
19702@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
19703 {
19704 return x;
19705 }
19706+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
19707+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
19708+#endif
19709
19710 void __init default_banner(void)
19711 {
19712@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
19713 * corresponding structure. */
19714 static void *get_call_destination(u8 type)
19715 {
19716- struct paravirt_patch_template tmpl = {
19717+ const struct paravirt_patch_template tmpl = {
19718 .pv_init_ops = pv_init_ops,
19719 .pv_time_ops = pv_time_ops,
19720 .pv_cpu_ops = pv_cpu_ops,
19721@@ -133,6 +136,8 @@ static void *get_call_destination(u8 type)
19722 .pv_lock_ops = pv_lock_ops,
19723 #endif
19724 };
19725+
19726+ pax_track_stack();
19727 return *((void **)&tmpl + type);
19728 }
19729
19730@@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
19731 if (opfunc == NULL)
19732 /* If there's no function, patch it with a ud2a (BUG) */
19733 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
19734- else if (opfunc == _paravirt_nop)
19735+ else if (opfunc == (void *)_paravirt_nop)
19736 /* If the operation is a nop, then nop the callsite */
19737 ret = paravirt_patch_nop();
19738
19739 /* identity functions just return their single argument */
19740- else if (opfunc == _paravirt_ident_32)
19741+ else if (opfunc == (void *)_paravirt_ident_32)
19742 ret = paravirt_patch_ident_32(insnbuf, len);
19743- else if (opfunc == _paravirt_ident_64)
19744+ else if (opfunc == (void *)_paravirt_ident_64)
19745 ret = paravirt_patch_ident_64(insnbuf, len);
19746+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
19747+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
19748+ ret = paravirt_patch_ident_64(insnbuf, len);
19749+#endif
19750
19751 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
19752 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
19753@@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
19754 if (insn_len > len || start == NULL)
19755 insn_len = len;
19756 else
19757- memcpy(insnbuf, start, insn_len);
19758+ memcpy(insnbuf, ktla_ktva(start), insn_len);
19759
19760 return insn_len;
19761 }
19762@@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
19763 preempt_enable();
19764 }
19765
19766-struct pv_info pv_info = {
19767+struct pv_info pv_info __read_only = {
19768 .name = "bare hardware",
19769 .paravirt_enabled = 0,
19770 .kernel_rpl = 0,
19771 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
19772 };
19773
19774-struct pv_init_ops pv_init_ops = {
19775+struct pv_init_ops pv_init_ops __read_only = {
19776 .patch = native_patch,
19777 };
19778
19779-struct pv_time_ops pv_time_ops = {
19780+struct pv_time_ops pv_time_ops __read_only = {
19781 .sched_clock = native_sched_clock,
19782 };
19783
19784-struct pv_irq_ops pv_irq_ops = {
19785+struct pv_irq_ops pv_irq_ops __read_only = {
19786 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
19787 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
19788 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
19789@@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
19790 #endif
19791 };
19792
19793-struct pv_cpu_ops pv_cpu_ops = {
19794+struct pv_cpu_ops pv_cpu_ops __read_only = {
19795 .cpuid = native_cpuid,
19796 .get_debugreg = native_get_debugreg,
19797 .set_debugreg = native_set_debugreg,
19798@@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
19799 .end_context_switch = paravirt_nop,
19800 };
19801
19802-struct pv_apic_ops pv_apic_ops = {
19803+struct pv_apic_ops pv_apic_ops __read_only = {
19804 #ifdef CONFIG_X86_LOCAL_APIC
19805 .startup_ipi_hook = paravirt_nop,
19806 #endif
19807 };
19808
19809-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
19810+#ifdef CONFIG_X86_32
19811+#ifdef CONFIG_X86_PAE
19812+/* 64-bit pagetable entries */
19813+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
19814+#else
19815 /* 32-bit pagetable entries */
19816 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
19817+#endif
19818 #else
19819 /* 64-bit pagetable entries */
19820 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
19821 #endif
19822
19823-struct pv_mmu_ops pv_mmu_ops = {
19824+struct pv_mmu_ops pv_mmu_ops __read_only = {
19825
19826 .read_cr2 = native_read_cr2,
19827 .write_cr2 = native_write_cr2,
19828@@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
19829 .make_pud = PTE_IDENT,
19830
19831 .set_pgd = native_set_pgd,
19832+ .set_pgd_batched = native_set_pgd_batched,
19833 #endif
19834 #endif /* PAGETABLE_LEVELS >= 3 */
19835
19836@@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
19837 },
19838
19839 .set_fixmap = native_set_fixmap,
19840+
19841+#ifdef CONFIG_PAX_KERNEXEC
19842+ .pax_open_kernel = native_pax_open_kernel,
19843+ .pax_close_kernel = native_pax_close_kernel,
19844+#endif
19845+
19846 };
19847
19848 EXPORT_SYMBOL_GPL(pv_time_ops);
19849diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
19850index 1a2d4b1..6a0dd55 100644
19851--- a/arch/x86/kernel/pci-calgary_64.c
19852+++ b/arch/x86/kernel/pci-calgary_64.c
19853@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
19854 free_pages((unsigned long)vaddr, get_order(size));
19855 }
19856
19857-static struct dma_map_ops calgary_dma_ops = {
19858+static const struct dma_map_ops calgary_dma_ops = {
19859 .alloc_coherent = calgary_alloc_coherent,
19860 .free_coherent = calgary_free_coherent,
19861 .map_sg = calgary_map_sg,
19862diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
19863index 6ac3931..42b4414 100644
19864--- a/arch/x86/kernel/pci-dma.c
19865+++ b/arch/x86/kernel/pci-dma.c
19866@@ -14,7 +14,7 @@
19867
19868 static int forbid_dac __read_mostly;
19869
19870-struct dma_map_ops *dma_ops;
19871+const struct dma_map_ops *dma_ops;
19872 EXPORT_SYMBOL(dma_ops);
19873
19874 static int iommu_sac_force __read_mostly;
19875@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
19876
19877 int dma_supported(struct device *dev, u64 mask)
19878 {
19879- struct dma_map_ops *ops = get_dma_ops(dev);
19880+ const struct dma_map_ops *ops = get_dma_ops(dev);
19881
19882 #ifdef CONFIG_PCI
19883 if (mask > 0xffffffff && forbid_dac > 0) {
19884diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
19885index 1c76691..e3632db 100644
19886--- a/arch/x86/kernel/pci-gart_64.c
19887+++ b/arch/x86/kernel/pci-gart_64.c
19888@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
19889 return -1;
19890 }
19891
19892-static struct dma_map_ops gart_dma_ops = {
19893+static const struct dma_map_ops gart_dma_ops = {
19894 .map_sg = gart_map_sg,
19895 .unmap_sg = gart_unmap_sg,
19896 .map_page = gart_map_page,
19897diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
19898index a3933d4..c898869 100644
19899--- a/arch/x86/kernel/pci-nommu.c
19900+++ b/arch/x86/kernel/pci-nommu.c
19901@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
19902 flush_write_buffers();
19903 }
19904
19905-struct dma_map_ops nommu_dma_ops = {
19906+const struct dma_map_ops nommu_dma_ops = {
19907 .alloc_coherent = dma_generic_alloc_coherent,
19908 .free_coherent = nommu_free_coherent,
19909 .map_sg = nommu_map_sg,
19910diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
19911index aaa6b78..4de1881 100644
19912--- a/arch/x86/kernel/pci-swiotlb.c
19913+++ b/arch/x86/kernel/pci-swiotlb.c
19914@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
19915 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
19916 }
19917
19918-static struct dma_map_ops swiotlb_dma_ops = {
19919+static const struct dma_map_ops swiotlb_dma_ops = {
19920 .mapping_error = swiotlb_dma_mapping_error,
19921 .alloc_coherent = x86_swiotlb_alloc_coherent,
19922 .free_coherent = swiotlb_free_coherent,
19923diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
19924index fc6c84d..0312ca2 100644
19925--- a/arch/x86/kernel/process.c
19926+++ b/arch/x86/kernel/process.c
19927@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_struct *tsk)
19928
19929 void free_thread_info(struct thread_info *ti)
19930 {
19931- free_thread_xstate(ti->task);
19932 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
19933 }
19934
19935+static struct kmem_cache *task_struct_cachep;
19936+
19937 void arch_task_cache_init(void)
19938 {
19939- task_xstate_cachep =
19940- kmem_cache_create("task_xstate", xstate_size,
19941+ /* create a slab on which task_structs can be allocated */
19942+ task_struct_cachep =
19943+ kmem_cache_create("task_struct", sizeof(struct task_struct),
19944+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
19945+
19946+ task_xstate_cachep =
19947+ kmem_cache_create("task_xstate", xstate_size,
19948 __alignof__(union thread_xstate),
19949- SLAB_PANIC | SLAB_NOTRACK, NULL);
19950+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
19951+}
19952+
19953+struct task_struct *alloc_task_struct(void)
19954+{
19955+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
19956+}
19957+
19958+void free_task_struct(struct task_struct *task)
19959+{
19960+ free_thread_xstate(task);
19961+ kmem_cache_free(task_struct_cachep, task);
19962 }
19963
19964 /*
19965@@ -73,7 +90,7 @@ void exit_thread(void)
19966 unsigned long *bp = t->io_bitmap_ptr;
19967
19968 if (bp) {
19969- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
19970+ struct tss_struct *tss = init_tss + get_cpu();
19971
19972 t->io_bitmap_ptr = NULL;
19973 clear_thread_flag(TIF_IO_BITMAP);
19974@@ -93,6 +110,9 @@ void flush_thread(void)
19975
19976 clear_tsk_thread_flag(tsk, TIF_DEBUG);
19977
19978+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19979+ loadsegment(gs, 0);
19980+#endif
19981 tsk->thread.debugreg0 = 0;
19982 tsk->thread.debugreg1 = 0;
19983 tsk->thread.debugreg2 = 0;
19984@@ -307,7 +327,7 @@ void default_idle(void)
19985 EXPORT_SYMBOL(default_idle);
19986 #endif
19987
19988-void stop_this_cpu(void *dummy)
19989+__noreturn void stop_this_cpu(void *dummy)
19990 {
19991 local_irq_disable();
19992 /*
19993@@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
19994 }
19995 early_param("idle", idle_setup);
19996
19997-unsigned long arch_align_stack(unsigned long sp)
19998+#ifdef CONFIG_PAX_RANDKSTACK
19999+void pax_randomize_kstack(struct pt_regs *regs)
20000 {
20001- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
20002- sp -= get_random_int() % 8192;
20003- return sp & ~0xf;
20004-}
20005+ struct thread_struct *thread = &current->thread;
20006+ unsigned long time;
20007
20008-unsigned long arch_randomize_brk(struct mm_struct *mm)
20009-{
20010- unsigned long range_end = mm->brk + 0x02000000;
20011- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
20012+ if (!randomize_va_space)
20013+ return;
20014+
20015+ if (v8086_mode(regs))
20016+ return;
20017+
20018+ rdtscl(time);
20019+
20020+ /* P4 seems to return a 0 LSB, ignore it */
20021+#ifdef CONFIG_MPENTIUM4
20022+ time &= 0x3EUL;
20023+ time <<= 2;
20024+#elif defined(CONFIG_X86_64)
20025+ time &= 0xFUL;
20026+ time <<= 4;
20027+#else
20028+ time &= 0x1FUL;
20029+ time <<= 3;
20030+#endif
20031+
20032+ thread->sp0 ^= time;
20033+ load_sp0(init_tss + smp_processor_id(), thread);
20034+
20035+#ifdef CONFIG_X86_64
20036+ percpu_write(kernel_stack, thread->sp0);
20037+#endif
20038 }
20039+#endif
20040
20041diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
20042index c40c432..6e1df72 100644
20043--- a/arch/x86/kernel/process_32.c
20044+++ b/arch/x86/kernel/process_32.c
20045@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
20046 unsigned long thread_saved_pc(struct task_struct *tsk)
20047 {
20048 return ((unsigned long *)tsk->thread.sp)[3];
20049+//XXX return tsk->thread.eip;
20050 }
20051
20052 #ifndef CONFIG_SMP
20053@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, int all)
20054 unsigned short ss, gs;
20055 const char *board;
20056
20057- if (user_mode_vm(regs)) {
20058+ if (user_mode(regs)) {
20059 sp = regs->sp;
20060 ss = regs->ss & 0xffff;
20061- gs = get_user_gs(regs);
20062 } else {
20063 sp = (unsigned long) (&regs->sp);
20064 savesegment(ss, ss);
20065- savesegment(gs, gs);
20066 }
20067+ gs = get_user_gs(regs);
20068
20069 printk("\n");
20070
20071@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
20072 regs.bx = (unsigned long) fn;
20073 regs.dx = (unsigned long) arg;
20074
20075- regs.ds = __USER_DS;
20076- regs.es = __USER_DS;
20077+ regs.ds = __KERNEL_DS;
20078+ regs.es = __KERNEL_DS;
20079 regs.fs = __KERNEL_PERCPU;
20080- regs.gs = __KERNEL_STACK_CANARY;
20081+ savesegment(gs, regs.gs);
20082 regs.orig_ax = -1;
20083 regs.ip = (unsigned long) kernel_thread_helper;
20084 regs.cs = __KERNEL_CS | get_kernel_rpl();
20085@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
20086 struct task_struct *tsk;
20087 int err;
20088
20089- childregs = task_pt_regs(p);
20090+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
20091 *childregs = *regs;
20092 childregs->ax = 0;
20093 childregs->sp = sp;
20094
20095 p->thread.sp = (unsigned long) childregs;
20096 p->thread.sp0 = (unsigned long) (childregs+1);
20097+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
20098
20099 p->thread.ip = (unsigned long) ret_from_fork;
20100
20101@@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20102 struct thread_struct *prev = &prev_p->thread,
20103 *next = &next_p->thread;
20104 int cpu = smp_processor_id();
20105- struct tss_struct *tss = &per_cpu(init_tss, cpu);
20106+ struct tss_struct *tss = init_tss + cpu;
20107 bool preload_fpu;
20108
20109 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
20110@@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20111 */
20112 lazy_save_gs(prev->gs);
20113
20114+#ifdef CONFIG_PAX_MEMORY_UDEREF
20115+ __set_fs(task_thread_info(next_p)->addr_limit);
20116+#endif
20117+
20118 /*
20119 * Load the per-thread Thread-Local Storage descriptor.
20120 */
20121@@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20122 */
20123 arch_end_context_switch(next_p);
20124
20125+ percpu_write(current_task, next_p);
20126+ percpu_write(current_tinfo, &next_p->tinfo);
20127+
20128 if (preload_fpu)
20129 __math_state_restore();
20130
20131@@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20132 if (prev->gs | next->gs)
20133 lazy_load_gs(next->gs);
20134
20135- percpu_write(current_task, next_p);
20136-
20137 return prev_p;
20138 }
20139
20140@@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_struct *p)
20141 } while (count++ < 16);
20142 return 0;
20143 }
20144-
20145diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
20146index 39493bc..196816d 100644
20147--- a/arch/x86/kernel/process_64.c
20148+++ b/arch/x86/kernel/process_64.c
20149@@ -91,7 +91,7 @@ static void __exit_idle(void)
20150 void exit_idle(void)
20151 {
20152 /* idle loop has pid 0 */
20153- if (current->pid)
20154+ if (task_pid_nr(current))
20155 return;
20156 __exit_idle();
20157 }
20158@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, int all)
20159 if (!board)
20160 board = "";
20161 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
20162- current->pid, current->comm, print_tainted(),
20163+ task_pid_nr(current), current->comm, print_tainted(),
20164 init_utsname()->release,
20165 (int)strcspn(init_utsname()->version, " "),
20166 init_utsname()->version, board);
20167@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
20168 struct pt_regs *childregs;
20169 struct task_struct *me = current;
20170
20171- childregs = ((struct pt_regs *)
20172- (THREAD_SIZE + task_stack_page(p))) - 1;
20173+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
20174 *childregs = *regs;
20175
20176 childregs->ax = 0;
20177@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
20178 p->thread.sp = (unsigned long) childregs;
20179 p->thread.sp0 = (unsigned long) (childregs+1);
20180 p->thread.usersp = me->thread.usersp;
20181+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
20182
20183 set_tsk_thread_flag(p, TIF_FORK);
20184
20185@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20186 struct thread_struct *prev = &prev_p->thread;
20187 struct thread_struct *next = &next_p->thread;
20188 int cpu = smp_processor_id();
20189- struct tss_struct *tss = &per_cpu(init_tss, cpu);
20190+ struct tss_struct *tss = init_tss + cpu;
20191 unsigned fsindex, gsindex;
20192 bool preload_fpu;
20193
20194@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20195 prev->usersp = percpu_read(old_rsp);
20196 percpu_write(old_rsp, next->usersp);
20197 percpu_write(current_task, next_p);
20198+ percpu_write(current_tinfo, &next_p->tinfo);
20199
20200- percpu_write(kernel_stack,
20201- (unsigned long)task_stack_page(next_p) +
20202- THREAD_SIZE - KERNEL_STACK_OFFSET);
20203+ percpu_write(kernel_stack, next->sp0);
20204
20205 /*
20206 * Now maybe reload the debug registers and handle I/O bitmaps
20207@@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_struct *p)
20208 if (!p || p == current || p->state == TASK_RUNNING)
20209 return 0;
20210 stack = (unsigned long)task_stack_page(p);
20211- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
20212+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
20213 return 0;
20214 fp = *(u64 *)(p->thread.sp);
20215 do {
20216- if (fp < (unsigned long)stack ||
20217- fp >= (unsigned long)stack+THREAD_SIZE)
20218+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
20219 return 0;
20220 ip = *(u64 *)(fp+8);
20221 if (!in_sched_functions(ip))
20222diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
20223index c06acdd..09de221 100644
20224--- a/arch/x86/kernel/ptrace.c
20225+++ b/arch/x86/kernel/ptrace.c
20226@@ -559,6 +559,10 @@ static int ioperm_active(struct task_struct *target,
20227 static int ioperm_get(struct task_struct *target,
20228 const struct user_regset *regset,
20229 unsigned int pos, unsigned int count,
20230+ void *kbuf, void __user *ubuf) __size_overflow(3,4);
20231+static int ioperm_get(struct task_struct *target,
20232+ const struct user_regset *regset,
20233+ unsigned int pos, unsigned int count,
20234 void *kbuf, void __user *ubuf)
20235 {
20236 if (!target->thread.io_bitmap_ptr)
20237@@ -925,7 +929,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
20238 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20239 {
20240 int ret;
20241- unsigned long __user *datap = (unsigned long __user *)data;
20242+ unsigned long __user *datap = (__force unsigned long __user *)data;
20243
20244 switch (request) {
20245 /* read the word at location addr in the USER area. */
20246@@ -1012,14 +1016,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20247 if (addr < 0)
20248 return -EIO;
20249 ret = do_get_thread_area(child, addr,
20250- (struct user_desc __user *) data);
20251+ (__force struct user_desc __user *) data);
20252 break;
20253
20254 case PTRACE_SET_THREAD_AREA:
20255 if (addr < 0)
20256 return -EIO;
20257 ret = do_set_thread_area(child, addr,
20258- (struct user_desc __user *) data, 0);
20259+ (__force struct user_desc __user *) data, 0);
20260 break;
20261 #endif
20262
20263@@ -1038,12 +1042,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20264 #ifdef CONFIG_X86_PTRACE_BTS
20265 case PTRACE_BTS_CONFIG:
20266 ret = ptrace_bts_config
20267- (child, data, (struct ptrace_bts_config __user *)addr);
20268+ (child, data, (__force struct ptrace_bts_config __user *)addr);
20269 break;
20270
20271 case PTRACE_BTS_STATUS:
20272 ret = ptrace_bts_status
20273- (child, data, (struct ptrace_bts_config __user *)addr);
20274+ (child, data, (__force struct ptrace_bts_config __user *)addr);
20275 break;
20276
20277 case PTRACE_BTS_SIZE:
20278@@ -1052,7 +1056,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20279
20280 case PTRACE_BTS_GET:
20281 ret = ptrace_bts_read_record
20282- (child, data, (struct bts_struct __user *) addr);
20283+ (child, data, (__force struct bts_struct __user *) addr);
20284 break;
20285
20286 case PTRACE_BTS_CLEAR:
20287@@ -1061,7 +1065,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20288
20289 case PTRACE_BTS_DRAIN:
20290 ret = ptrace_bts_drain
20291- (child, data, (struct bts_struct __user *) addr);
20292+ (child, data, (__force struct bts_struct __user *) addr);
20293 break;
20294 #endif /* CONFIG_X86_PTRACE_BTS */
20295
20296@@ -1450,7 +1454,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
20297 info.si_code = si_code;
20298
20299 /* User-mode ip? */
20300- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
20301+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
20302
20303 /* Send us the fake SIGTRAP */
20304 force_sig_info(SIGTRAP, &info, tsk);
20305@@ -1469,7 +1473,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
20306 * We must return the syscall number to actually look up in the table.
20307 * This can be -1L to skip running any syscall at all.
20308 */
20309-asmregparm long syscall_trace_enter(struct pt_regs *regs)
20310+long syscall_trace_enter(struct pt_regs *regs)
20311 {
20312 long ret = 0;
20313
20314@@ -1514,7 +1518,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
20315 return ret ?: regs->orig_ax;
20316 }
20317
20318-asmregparm void syscall_trace_leave(struct pt_regs *regs)
20319+void syscall_trace_leave(struct pt_regs *regs)
20320 {
20321 if (unlikely(current->audit_context))
20322 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
20323diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
20324index cf98100..e76e03d 100644
20325--- a/arch/x86/kernel/reboot.c
20326+++ b/arch/x86/kernel/reboot.c
20327@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
20328 EXPORT_SYMBOL(pm_power_off);
20329
20330 static const struct desc_ptr no_idt = {};
20331-static int reboot_mode;
20332+static unsigned short reboot_mode;
20333 enum reboot_type reboot_type = BOOT_KBD;
20334 int reboot_force;
20335
20336@@ -292,12 +292,12 @@ core_initcall(reboot_init);
20337 controller to pulse the CPU reset line, which is more thorough, but
20338 doesn't work with at least one type of 486 motherboard. It is easy
20339 to stop this code working; hence the copious comments. */
20340-static const unsigned long long
20341-real_mode_gdt_entries [3] =
20342+static struct desc_struct
20343+real_mode_gdt_entries [3] __read_only =
20344 {
20345- 0x0000000000000000ULL, /* Null descriptor */
20346- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
20347- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
20348+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
20349+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
20350+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
20351 };
20352
20353 static const struct desc_ptr
20354@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios [] =
20355 * specified by the code and length parameters.
20356 * We assume that length will aways be less that 100!
20357 */
20358-void machine_real_restart(const unsigned char *code, int length)
20359+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
20360 {
20361 local_irq_disable();
20362
20363@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned char *code, int length)
20364 /* Remap the kernel at virtual address zero, as well as offset zero
20365 from the kernel segment. This assumes the kernel segment starts at
20366 virtual address PAGE_OFFSET. */
20367- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20368- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
20369+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20370+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
20371
20372 /*
20373 * Use `swapper_pg_dir' as our page directory.
20374@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned char *code, int length)
20375 boot)". This seems like a fairly standard thing that gets set by
20376 REBOOT.COM programs, and the previous reset routine did this
20377 too. */
20378- *((unsigned short *)0x472) = reboot_mode;
20379+ *(unsigned short *)(__va(0x472)) = reboot_mode;
20380
20381 /* For the switch to real mode, copy some code to low memory. It has
20382 to be in the first 64k because it is running in 16-bit mode, and it
20383 has to have the same physical and virtual address, because it turns
20384 off paging. Copy it near the end of the first page, out of the way
20385 of BIOS variables. */
20386- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
20387- real_mode_switch, sizeof (real_mode_switch));
20388- memcpy((void *)(0x1000 - 100), code, length);
20389+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
20390+ memcpy(__va(0x1000 - 100), code, length);
20391
20392 /* Set up the IDT for real mode. */
20393 load_idt(&real_mode_idt);
20394@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned char *code, int length)
20395 __asm__ __volatile__ ("ljmp $0x0008,%0"
20396 :
20397 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
20398+ do { } while (1);
20399 }
20400 #ifdef CONFIG_APM_MODULE
20401 EXPORT_SYMBOL(machine_real_restart);
20402@@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
20403 {
20404 }
20405
20406-static void native_machine_emergency_restart(void)
20407+__noreturn static void native_machine_emergency_restart(void)
20408 {
20409 int i;
20410
20411@@ -659,13 +659,13 @@ void native_machine_shutdown(void)
20412 #endif
20413 }
20414
20415-static void __machine_emergency_restart(int emergency)
20416+static __noreturn void __machine_emergency_restart(int emergency)
20417 {
20418 reboot_emergency = emergency;
20419 machine_ops.emergency_restart();
20420 }
20421
20422-static void native_machine_restart(char *__unused)
20423+static __noreturn void native_machine_restart(char *__unused)
20424 {
20425 printk("machine restart\n");
20426
20427@@ -674,7 +674,7 @@ static void native_machine_restart(char *__unused)
20428 __machine_emergency_restart(0);
20429 }
20430
20431-static void native_machine_halt(void)
20432+static __noreturn void native_machine_halt(void)
20433 {
20434 /* stop other cpus and apics */
20435 machine_shutdown();
20436@@ -685,7 +685,7 @@ static void native_machine_halt(void)
20437 stop_this_cpu(NULL);
20438 }
20439
20440-static void native_machine_power_off(void)
20441+__noreturn static void native_machine_power_off(void)
20442 {
20443 if (pm_power_off) {
20444 if (!reboot_force)
20445@@ -694,6 +694,7 @@ static void native_machine_power_off(void)
20446 }
20447 /* a fallback in case there is no PM info available */
20448 tboot_shutdown(TB_SHUTDOWN_HALT);
20449+ do { } while (1);
20450 }
20451
20452 struct machine_ops machine_ops = {
20453diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
20454index 7a6f3b3..976a959 100644
20455--- a/arch/x86/kernel/relocate_kernel_64.S
20456+++ b/arch/x86/kernel/relocate_kernel_64.S
20457@@ -11,6 +11,7 @@
20458 #include <asm/kexec.h>
20459 #include <asm/processor-flags.h>
20460 #include <asm/pgtable_types.h>
20461+#include <asm/alternative-asm.h>
20462
20463 /*
20464 * Must be relocatable PIC code callable as a C function
20465@@ -167,6 +168,7 @@ identity_mapped:
20466 xorq %r14, %r14
20467 xorq %r15, %r15
20468
20469+ pax_force_retaddr 0, 1
20470 ret
20471
20472 1:
20473diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
20474index 5449a26..0b6c759 100644
20475--- a/arch/x86/kernel/setup.c
20476+++ b/arch/x86/kernel/setup.c
20477@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
20478
20479 if (!boot_params.hdr.root_flags)
20480 root_mountflags &= ~MS_RDONLY;
20481- init_mm.start_code = (unsigned long) _text;
20482- init_mm.end_code = (unsigned long) _etext;
20483+ init_mm.start_code = ktla_ktva((unsigned long) _text);
20484+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
20485 init_mm.end_data = (unsigned long) _edata;
20486 init_mm.brk = _brk_end;
20487
20488- code_resource.start = virt_to_phys(_text);
20489- code_resource.end = virt_to_phys(_etext)-1;
20490- data_resource.start = virt_to_phys(_etext);
20491+ code_resource.start = virt_to_phys(ktla_ktva(_text));
20492+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
20493+ data_resource.start = virt_to_phys(_sdata);
20494 data_resource.end = virt_to_phys(_edata)-1;
20495 bss_resource.start = virt_to_phys(&__bss_start);
20496 bss_resource.end = virt_to_phys(&__bss_stop)-1;
20497diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
20498index d559af9..244f55d 100644
20499--- a/arch/x86/kernel/setup_percpu.c
20500+++ b/arch/x86/kernel/setup_percpu.c
20501@@ -25,19 +25,17 @@
20502 # define DBG(x...)
20503 #endif
20504
20505-DEFINE_PER_CPU(int, cpu_number);
20506+#ifdef CONFIG_SMP
20507+DEFINE_PER_CPU(unsigned int, cpu_number);
20508 EXPORT_PER_CPU_SYMBOL(cpu_number);
20509+#endif
20510
20511-#ifdef CONFIG_X86_64
20512 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
20513-#else
20514-#define BOOT_PERCPU_OFFSET 0
20515-#endif
20516
20517 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
20518 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
20519
20520-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
20521+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
20522 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
20523 };
20524 EXPORT_SYMBOL(__per_cpu_offset);
20525@@ -100,6 +98,8 @@ static bool __init pcpu_need_numa(void)
20526 * Pointer to the allocated area on success, NULL on failure.
20527 */
20528 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
20529+ unsigned long align) __size_overflow(2);
20530+static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
20531 unsigned long align)
20532 {
20533 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
20534@@ -128,6 +128,8 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
20535 /*
20536 * Helpers for first chunk memory allocation
20537 */
20538+static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) __size_overflow(2);
20539+
20540 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
20541 {
20542 return pcpu_alloc_bootmem(cpu, size, align);
20543@@ -159,10 +161,10 @@ static inline void setup_percpu_segment(int cpu)
20544 {
20545 #ifdef CONFIG_X86_32
20546 struct desc_struct gdt;
20547+ unsigned long base = per_cpu_offset(cpu);
20548
20549- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
20550- 0x2 | DESCTYPE_S, 0x8);
20551- gdt.s = 1;
20552+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
20553+ 0x83 | DESCTYPE_S, 0xC);
20554 write_gdt_entry(get_cpu_gdt_table(cpu),
20555 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
20556 #endif
20557@@ -212,6 +214,11 @@ void __init setup_per_cpu_areas(void)
20558 /* alrighty, percpu areas up and running */
20559 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
20560 for_each_possible_cpu(cpu) {
20561+#ifdef CONFIG_CC_STACKPROTECTOR
20562+#ifdef CONFIG_X86_32
20563+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
20564+#endif
20565+#endif
20566 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
20567 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
20568 per_cpu(cpu_number, cpu) = cpu;
20569@@ -239,6 +246,12 @@ void __init setup_per_cpu_areas(void)
20570 early_per_cpu_map(x86_cpu_to_node_map, cpu);
20571 #endif
20572 #endif
20573+#ifdef CONFIG_CC_STACKPROTECTOR
20574+#ifdef CONFIG_X86_32
20575+ if (!cpu)
20576+ per_cpu(stack_canary.canary, cpu) = canary;
20577+#endif
20578+#endif
20579 /*
20580 * Up to this point, the boot CPU has been using .data.init
20581 * area. Reload any changed state for the boot CPU.
20582diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
20583index 6a44a76..a9287a1 100644
20584--- a/arch/x86/kernel/signal.c
20585+++ b/arch/x86/kernel/signal.c
20586@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsigned long sp)
20587 * Align the stack pointer according to the i386 ABI,
20588 * i.e. so that on function entry ((sp + 4) & 15) == 0.
20589 */
20590- sp = ((sp + 4) & -16ul) - 4;
20591+ sp = ((sp - 12) & -16ul) - 4;
20592 #else /* !CONFIG_X86_32 */
20593 sp = round_down(sp, 16) - 8;
20594 #endif
20595@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
20596 * Return an always-bogus address instead so we will die with SIGSEGV.
20597 */
20598 if (onsigstack && !likely(on_sig_stack(sp)))
20599- return (void __user *)-1L;
20600+ return (__force void __user *)-1L;
20601
20602 /* save i387 state */
20603 if (used_math() && save_i387_xstate(*fpstate) < 0)
20604- return (void __user *)-1L;
20605+ return (__force void __user *)-1L;
20606
20607 return (void __user *)sp;
20608 }
20609@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
20610 }
20611
20612 if (current->mm->context.vdso)
20613- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
20614+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
20615 else
20616- restorer = &frame->retcode;
20617+ restorer = (void __user *)&frame->retcode;
20618 if (ka->sa.sa_flags & SA_RESTORER)
20619 restorer = ka->sa.sa_restorer;
20620
20621@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
20622 * reasons and because gdb uses it as a signature to notice
20623 * signal handler stack frames.
20624 */
20625- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
20626+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
20627
20628 if (err)
20629 return -EFAULT;
20630@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
20631 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
20632
20633 /* Set up to return from userspace. */
20634- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
20635+ if (current->mm->context.vdso)
20636+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
20637+ else
20638+ restorer = (void __user *)&frame->retcode;
20639 if (ka->sa.sa_flags & SA_RESTORER)
20640 restorer = ka->sa.sa_restorer;
20641 put_user_ex(restorer, &frame->pretcode);
20642@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
20643 * reasons and because gdb uses it as a signature to notice
20644 * signal handler stack frames.
20645 */
20646- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
20647+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
20648 } put_user_catch(err);
20649
20650 if (err)
20651@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *regs)
20652 int signr;
20653 sigset_t *oldset;
20654
20655+ pax_track_stack();
20656+
20657 /*
20658 * We want the common case to go fast, which is why we may in certain
20659 * cases get here from kernel mode. Just return without doing anything
20660@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *regs)
20661 * X86_32: vm86 regs switched out by assembly code before reaching
20662 * here, so testing against kernel CS suffices.
20663 */
20664- if (!user_mode(regs))
20665+ if (!user_mode_novm(regs))
20666 return;
20667
20668 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
20669diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
20670index 7e8e905..64d5c32 100644
20671--- a/arch/x86/kernel/smpboot.c
20672+++ b/arch/x86/kernel/smpboot.c
20673@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
20674 */
20675 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
20676
20677-void cpu_hotplug_driver_lock()
20678+void cpu_hotplug_driver_lock(void)
20679 {
20680- mutex_lock(&x86_cpu_hotplug_driver_mutex);
20681+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
20682 }
20683
20684-void cpu_hotplug_driver_unlock()
20685+void cpu_hotplug_driver_unlock(void)
20686 {
20687- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
20688+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
20689 }
20690
20691 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
20692@@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
20693 * target processor state.
20694 */
20695 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
20696- (unsigned long)stack_start.sp);
20697+ stack_start);
20698
20699 /*
20700 * Run STARTUP IPI loop.
20701@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
20702 set_idle_for_cpu(cpu, c_idle.idle);
20703 do_rest:
20704 per_cpu(current_task, cpu) = c_idle.idle;
20705+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
20706 #ifdef CONFIG_X86_32
20707 /* Stack for startup_32 can be just as for start_secondary onwards */
20708 irq_ctx_init(cpu);
20709@@ -750,13 +751,15 @@ do_rest:
20710 #else
20711 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
20712 initial_gs = per_cpu_offset(cpu);
20713- per_cpu(kernel_stack, cpu) =
20714- (unsigned long)task_stack_page(c_idle.idle) -
20715- KERNEL_STACK_OFFSET + THREAD_SIZE;
20716+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
20717 #endif
20718+
20719+ pax_open_kernel();
20720 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
20721+ pax_close_kernel();
20722+
20723 initial_code = (unsigned long)start_secondary;
20724- stack_start.sp = (void *) c_idle.idle->thread.sp;
20725+ stack_start = c_idle.idle->thread.sp;
20726
20727 /* start_ip had better be page-aligned! */
20728 start_ip = setup_trampoline();
20729@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
20730
20731 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
20732
20733+#ifdef CONFIG_PAX_PER_CPU_PGD
20734+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
20735+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20736+ KERNEL_PGD_PTRS);
20737+#endif
20738+
20739 err = do_boot_cpu(apicid, cpu);
20740
20741 if (err) {
20742diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
20743index 3149032..14f1053 100644
20744--- a/arch/x86/kernel/step.c
20745+++ b/arch/x86/kernel/step.c
20746@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
20747 struct desc_struct *desc;
20748 unsigned long base;
20749
20750- seg &= ~7UL;
20751+ seg >>= 3;
20752
20753 mutex_lock(&child->mm->context.lock);
20754- if (unlikely((seg >> 3) >= child->mm->context.size))
20755+ if (unlikely(seg >= child->mm->context.size))
20756 addr = -1L; /* bogus selector, access would fault */
20757 else {
20758 desc = child->mm->context.ldt + seg;
20759@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
20760 addr += base;
20761 }
20762 mutex_unlock(&child->mm->context.lock);
20763- }
20764+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
20765+ addr = ktla_ktva(addr);
20766
20767 return addr;
20768 }
20769@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
20770 unsigned char opcode[15];
20771 unsigned long addr = convert_ip_to_linear(child, regs);
20772
20773+ if (addr == -EINVAL)
20774+ return 0;
20775+
20776 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
20777 for (i = 0; i < copied; i++) {
20778 switch (opcode[i]) {
20779@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
20780
20781 #ifdef CONFIG_X86_64
20782 case 0x40 ... 0x4f:
20783- if (regs->cs != __USER_CS)
20784+ if ((regs->cs & 0xffff) != __USER_CS)
20785 /* 32-bit mode: register increment */
20786 return 0;
20787 /* 64-bit mode: REX prefix */
20788diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
20789index dee1ff7..a397f7f 100644
20790--- a/arch/x86/kernel/sys_i386_32.c
20791+++ b/arch/x86/kernel/sys_i386_32.c
20792@@ -24,6 +24,21 @@
20793
20794 #include <asm/syscalls.h>
20795
20796+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
20797+{
20798+ unsigned long pax_task_size = TASK_SIZE;
20799+
20800+#ifdef CONFIG_PAX_SEGMEXEC
20801+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
20802+ pax_task_size = SEGMEXEC_TASK_SIZE;
20803+#endif
20804+
20805+ if (len > pax_task_size || addr > pax_task_size - len)
20806+ return -EINVAL;
20807+
20808+ return 0;
20809+}
20810+
20811 /*
20812 * Perform the select(nd, in, out, ex, tv) and mmap() system
20813 * calls. Linux/i386 didn't use to be able to handle more than
20814@@ -58,6 +73,212 @@ out:
20815 return err;
20816 }
20817
20818+unsigned long
20819+arch_get_unmapped_area(struct file *filp, unsigned long addr,
20820+ unsigned long len, unsigned long pgoff, unsigned long flags)
20821+{
20822+ struct mm_struct *mm = current->mm;
20823+ struct vm_area_struct *vma;
20824+ unsigned long start_addr, pax_task_size = TASK_SIZE;
20825+
20826+#ifdef CONFIG_PAX_SEGMEXEC
20827+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20828+ pax_task_size = SEGMEXEC_TASK_SIZE;
20829+#endif
20830+
20831+ pax_task_size -= PAGE_SIZE;
20832+
20833+ if (len > pax_task_size)
20834+ return -ENOMEM;
20835+
20836+ if (flags & MAP_FIXED)
20837+ return addr;
20838+
20839+#ifdef CONFIG_PAX_RANDMMAP
20840+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20841+#endif
20842+
20843+ if (addr) {
20844+ addr = PAGE_ALIGN(addr);
20845+ if (pax_task_size - len >= addr) {
20846+ vma = find_vma(mm, addr);
20847+ if (check_heap_stack_gap(vma, addr, len))
20848+ return addr;
20849+ }
20850+ }
20851+ if (len > mm->cached_hole_size) {
20852+ start_addr = addr = mm->free_area_cache;
20853+ } else {
20854+ start_addr = addr = mm->mmap_base;
20855+ mm->cached_hole_size = 0;
20856+ }
20857+
20858+#ifdef CONFIG_PAX_PAGEEXEC
20859+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
20860+ start_addr = 0x00110000UL;
20861+
20862+#ifdef CONFIG_PAX_RANDMMAP
20863+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20864+ start_addr += mm->delta_mmap & 0x03FFF000UL;
20865+#endif
20866+
20867+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
20868+ start_addr = addr = mm->mmap_base;
20869+ else
20870+ addr = start_addr;
20871+ }
20872+#endif
20873+
20874+full_search:
20875+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20876+ /* At this point: (!vma || addr < vma->vm_end). */
20877+ if (pax_task_size - len < addr) {
20878+ /*
20879+ * Start a new search - just in case we missed
20880+ * some holes.
20881+ */
20882+ if (start_addr != mm->mmap_base) {
20883+ start_addr = addr = mm->mmap_base;
20884+ mm->cached_hole_size = 0;
20885+ goto full_search;
20886+ }
20887+ return -ENOMEM;
20888+ }
20889+ if (check_heap_stack_gap(vma, addr, len))
20890+ break;
20891+ if (addr + mm->cached_hole_size < vma->vm_start)
20892+ mm->cached_hole_size = vma->vm_start - addr;
20893+ addr = vma->vm_end;
20894+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
20895+ start_addr = addr = mm->mmap_base;
20896+ mm->cached_hole_size = 0;
20897+ goto full_search;
20898+ }
20899+ }
20900+
20901+ /*
20902+ * Remember the place where we stopped the search:
20903+ */
20904+ mm->free_area_cache = addr + len;
20905+ return addr;
20906+}
20907+
20908+unsigned long
20909+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20910+ const unsigned long len, const unsigned long pgoff,
20911+ const unsigned long flags)
20912+{
20913+ struct vm_area_struct *vma;
20914+ struct mm_struct *mm = current->mm;
20915+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
20916+
20917+#ifdef CONFIG_PAX_SEGMEXEC
20918+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20919+ pax_task_size = SEGMEXEC_TASK_SIZE;
20920+#endif
20921+
20922+ pax_task_size -= PAGE_SIZE;
20923+
20924+ /* requested length too big for entire address space */
20925+ if (len > pax_task_size)
20926+ return -ENOMEM;
20927+
20928+ if (flags & MAP_FIXED)
20929+ return addr;
20930+
20931+#ifdef CONFIG_PAX_PAGEEXEC
20932+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
20933+ goto bottomup;
20934+#endif
20935+
20936+#ifdef CONFIG_PAX_RANDMMAP
20937+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20938+#endif
20939+
20940+ /* requesting a specific address */
20941+ if (addr) {
20942+ addr = PAGE_ALIGN(addr);
20943+ if (pax_task_size - len >= addr) {
20944+ vma = find_vma(mm, addr);
20945+ if (check_heap_stack_gap(vma, addr, len))
20946+ return addr;
20947+ }
20948+ }
20949+
20950+ /* check if free_area_cache is useful for us */
20951+ if (len <= mm->cached_hole_size) {
20952+ mm->cached_hole_size = 0;
20953+ mm->free_area_cache = mm->mmap_base;
20954+ }
20955+
20956+ /* either no address requested or can't fit in requested address hole */
20957+ addr = mm->free_area_cache;
20958+
20959+ /* make sure it can fit in the remaining address space */
20960+ if (addr > len) {
20961+ vma = find_vma(mm, addr-len);
20962+ if (check_heap_stack_gap(vma, addr - len, len))
20963+ /* remember the address as a hint for next time */
20964+ return (mm->free_area_cache = addr-len);
20965+ }
20966+
20967+ if (mm->mmap_base < len)
20968+ goto bottomup;
20969+
20970+ addr = mm->mmap_base-len;
20971+
20972+ do {
20973+ /*
20974+ * Lookup failure means no vma is above this address,
20975+ * else if new region fits below vma->vm_start,
20976+ * return with success:
20977+ */
20978+ vma = find_vma(mm, addr);
20979+ if (check_heap_stack_gap(vma, addr, len))
20980+ /* remember the address as a hint for next time */
20981+ return (mm->free_area_cache = addr);
20982+
20983+ /* remember the largest hole we saw so far */
20984+ if (addr + mm->cached_hole_size < vma->vm_start)
20985+ mm->cached_hole_size = vma->vm_start - addr;
20986+
20987+ /* try just below the current vma->vm_start */
20988+ addr = skip_heap_stack_gap(vma, len);
20989+ } while (!IS_ERR_VALUE(addr));
20990+
20991+bottomup:
20992+ /*
20993+ * A failed mmap() very likely causes application failure,
20994+ * so fall back to the bottom-up function here. This scenario
20995+ * can happen with large stack limits and large mmap()
20996+ * allocations.
20997+ */
20998+
20999+#ifdef CONFIG_PAX_SEGMEXEC
21000+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21001+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
21002+ else
21003+#endif
21004+
21005+ mm->mmap_base = TASK_UNMAPPED_BASE;
21006+
21007+#ifdef CONFIG_PAX_RANDMMAP
21008+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21009+ mm->mmap_base += mm->delta_mmap;
21010+#endif
21011+
21012+ mm->free_area_cache = mm->mmap_base;
21013+ mm->cached_hole_size = ~0UL;
21014+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
21015+ /*
21016+ * Restore the topdown base:
21017+ */
21018+ mm->mmap_base = base;
21019+ mm->free_area_cache = base;
21020+ mm->cached_hole_size = ~0UL;
21021+
21022+ return addr;
21023+}
21024
21025 struct sel_arg_struct {
21026 unsigned long n;
21027@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
21028 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
21029 case SEMTIMEDOP:
21030 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
21031- (const struct timespec __user *)fifth);
21032+ (__force const struct timespec __user *)fifth);
21033
21034 case SEMGET:
21035 return sys_semget(first, second, third);
21036@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
21037 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
21038 if (ret)
21039 return ret;
21040- return put_user(raddr, (ulong __user *) third);
21041+ return put_user(raddr, (__force ulong __user *) third);
21042 }
21043 case 1: /* iBCS2 emulator entry point */
21044 if (!segment_eq(get_fs(), get_ds()))
21045@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
21046
21047 return error;
21048 }
21049-
21050-
21051-/*
21052- * Do a system call from kernel instead of calling sys_execve so we
21053- * end up with proper pt_regs.
21054- */
21055-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
21056-{
21057- long __res;
21058- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
21059- : "=a" (__res)
21060- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
21061- return __res;
21062-}
21063diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
21064index 8aa2057..b604bc1 100644
21065--- a/arch/x86/kernel/sys_x86_64.c
21066+++ b/arch/x86/kernel/sys_x86_64.c
21067@@ -32,8 +32,8 @@ out:
21068 return error;
21069 }
21070
21071-static void find_start_end(unsigned long flags, unsigned long *begin,
21072- unsigned long *end)
21073+static void find_start_end(struct mm_struct *mm, unsigned long flags,
21074+ unsigned long *begin, unsigned long *end)
21075 {
21076 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
21077 unsigned long new_begin;
21078@@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
21079 *begin = new_begin;
21080 }
21081 } else {
21082- *begin = TASK_UNMAPPED_BASE;
21083+ *begin = mm->mmap_base;
21084 *end = TASK_SIZE;
21085 }
21086 }
21087@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
21088 if (flags & MAP_FIXED)
21089 return addr;
21090
21091- find_start_end(flags, &begin, &end);
21092+ find_start_end(mm, flags, &begin, &end);
21093
21094 if (len > end)
21095 return -ENOMEM;
21096
21097+#ifdef CONFIG_PAX_RANDMMAP
21098+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
21099+#endif
21100+
21101 if (addr) {
21102 addr = PAGE_ALIGN(addr);
21103 vma = find_vma(mm, addr);
21104- if (end - len >= addr &&
21105- (!vma || addr + len <= vma->vm_start))
21106+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
21107 return addr;
21108 }
21109 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
21110@@ -106,7 +109,7 @@ full_search:
21111 }
21112 return -ENOMEM;
21113 }
21114- if (!vma || addr + len <= vma->vm_start) {
21115+ if (check_heap_stack_gap(vma, addr, len)) {
21116 /*
21117 * Remember the place where we stopped the search:
21118 */
21119@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21120 {
21121 struct vm_area_struct *vma;
21122 struct mm_struct *mm = current->mm;
21123- unsigned long addr = addr0;
21124+ unsigned long base = mm->mmap_base, addr = addr0;
21125
21126 /* requested length too big for entire address space */
21127 if (len > TASK_SIZE)
21128@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21129 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
21130 goto bottomup;
21131
21132+#ifdef CONFIG_PAX_RANDMMAP
21133+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
21134+#endif
21135+
21136 /* requesting a specific address */
21137 if (addr) {
21138 addr = PAGE_ALIGN(addr);
21139- vma = find_vma(mm, addr);
21140- if (TASK_SIZE - len >= addr &&
21141- (!vma || addr + len <= vma->vm_start))
21142- return addr;
21143+ if (TASK_SIZE - len >= addr) {
21144+ vma = find_vma(mm, addr);
21145+ if (check_heap_stack_gap(vma, addr, len))
21146+ return addr;
21147+ }
21148 }
21149
21150 /* check if free_area_cache is useful for us */
21151@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21152 /* make sure it can fit in the remaining address space */
21153 if (addr > len) {
21154 vma = find_vma(mm, addr-len);
21155- if (!vma || addr <= vma->vm_start)
21156+ if (check_heap_stack_gap(vma, addr - len, len))
21157 /* remember the address as a hint for next time */
21158 return mm->free_area_cache = addr-len;
21159 }
21160@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21161 * return with success:
21162 */
21163 vma = find_vma(mm, addr);
21164- if (!vma || addr+len <= vma->vm_start)
21165+ if (check_heap_stack_gap(vma, addr, len))
21166 /* remember the address as a hint for next time */
21167 return mm->free_area_cache = addr;
21168
21169@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21170 mm->cached_hole_size = vma->vm_start - addr;
21171
21172 /* try just below the current vma->vm_start */
21173- addr = vma->vm_start-len;
21174- } while (len < vma->vm_start);
21175+ addr = skip_heap_stack_gap(vma, len);
21176+ } while (!IS_ERR_VALUE(addr));
21177
21178 bottomup:
21179 /*
21180@@ -198,13 +206,21 @@ bottomup:
21181 * can happen with large stack limits and large mmap()
21182 * allocations.
21183 */
21184+ mm->mmap_base = TASK_UNMAPPED_BASE;
21185+
21186+#ifdef CONFIG_PAX_RANDMMAP
21187+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21188+ mm->mmap_base += mm->delta_mmap;
21189+#endif
21190+
21191+ mm->free_area_cache = mm->mmap_base;
21192 mm->cached_hole_size = ~0UL;
21193- mm->free_area_cache = TASK_UNMAPPED_BASE;
21194 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
21195 /*
21196 * Restore the topdown base:
21197 */
21198- mm->free_area_cache = mm->mmap_base;
21199+ mm->mmap_base = base;
21200+ mm->free_area_cache = base;
21201 mm->cached_hole_size = ~0UL;
21202
21203 return addr;
21204diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
21205index 76d70a4..4c94a44 100644
21206--- a/arch/x86/kernel/syscall_table_32.S
21207+++ b/arch/x86/kernel/syscall_table_32.S
21208@@ -1,3 +1,4 @@
21209+.section .rodata,"a",@progbits
21210 ENTRY(sys_call_table)
21211 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
21212 .long sys_exit
21213diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
21214index 46b8277..3349d55 100644
21215--- a/arch/x86/kernel/tboot.c
21216+++ b/arch/x86/kernel/tboot.c
21217@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
21218
21219 void tboot_shutdown(u32 shutdown_type)
21220 {
21221- void (*shutdown)(void);
21222+ void (* __noreturn shutdown)(void);
21223
21224 if (!tboot_enabled())
21225 return;
21226@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
21227
21228 switch_to_tboot_pt();
21229
21230- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
21231+ shutdown = (void *)tboot->shutdown_entry;
21232 shutdown();
21233
21234 /* should not reach here */
21235@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
21236 tboot_shutdown(acpi_shutdown_map[sleep_state]);
21237 }
21238
21239-static atomic_t ap_wfs_count;
21240+static atomic_unchecked_t ap_wfs_count;
21241
21242 static int tboot_wait_for_aps(int num_aps)
21243 {
21244@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
21245 {
21246 switch (action) {
21247 case CPU_DYING:
21248- atomic_inc(&ap_wfs_count);
21249+ atomic_inc_unchecked(&ap_wfs_count);
21250 if (num_online_cpus() == 1)
21251- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
21252+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
21253 return NOTIFY_BAD;
21254 break;
21255 }
21256@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
21257
21258 tboot_create_trampoline();
21259
21260- atomic_set(&ap_wfs_count, 0);
21261+ atomic_set_unchecked(&ap_wfs_count, 0);
21262 register_hotcpu_notifier(&tboot_cpu_notifier);
21263 return 0;
21264 }
21265diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
21266index be25734..87fe232 100644
21267--- a/arch/x86/kernel/time.c
21268+++ b/arch/x86/kernel/time.c
21269@@ -26,17 +26,13 @@
21270 int timer_ack;
21271 #endif
21272
21273-#ifdef CONFIG_X86_64
21274-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
21275-#endif
21276-
21277 unsigned long profile_pc(struct pt_regs *regs)
21278 {
21279 unsigned long pc = instruction_pointer(regs);
21280
21281- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
21282+ if (!user_mode(regs) && in_lock_functions(pc)) {
21283 #ifdef CONFIG_FRAME_POINTER
21284- return *(unsigned long *)(regs->bp + sizeof(long));
21285+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
21286 #else
21287 unsigned long *sp =
21288 (unsigned long *)kernel_stack_pointer(regs);
21289@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
21290 * or above a saved flags. Eflags has bits 22-31 zero,
21291 * kernel addresses don't.
21292 */
21293+
21294+#ifdef CONFIG_PAX_KERNEXEC
21295+ return ktla_ktva(sp[0]);
21296+#else
21297 if (sp[0] >> 22)
21298 return sp[0];
21299 if (sp[1] >> 22)
21300 return sp[1];
21301 #endif
21302+
21303+#endif
21304 }
21305 return pc;
21306 }
21307diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
21308index 6bb7b85..dd853e1 100644
21309--- a/arch/x86/kernel/tls.c
21310+++ b/arch/x86/kernel/tls.c
21311@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
21312 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
21313 return -EINVAL;
21314
21315+#ifdef CONFIG_PAX_SEGMEXEC
21316+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
21317+ return -EINVAL;
21318+#endif
21319+
21320 set_tls_desc(p, idx, &info, 1);
21321
21322 return 0;
21323diff --git a/arch/x86/kernel/tls.h b/arch/x86/kernel/tls.h
21324index 2f083a2..7d3fecc 100644
21325--- a/arch/x86/kernel/tls.h
21326+++ b/arch/x86/kernel/tls.h
21327@@ -16,6 +16,6 @@
21328
21329 extern user_regset_active_fn regset_tls_active;
21330 extern user_regset_get_fn regset_tls_get;
21331-extern user_regset_set_fn regset_tls_set;
21332+extern user_regset_set_fn regset_tls_set __size_overflow(4);
21333
21334 #endif /* _ARCH_X86_KERNEL_TLS_H */
21335diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
21336index 8508237..229b664 100644
21337--- a/arch/x86/kernel/trampoline_32.S
21338+++ b/arch/x86/kernel/trampoline_32.S
21339@@ -32,6 +32,12 @@
21340 #include <asm/segment.h>
21341 #include <asm/page_types.h>
21342
21343+#ifdef CONFIG_PAX_KERNEXEC
21344+#define ta(X) (X)
21345+#else
21346+#define ta(X) ((X) - __PAGE_OFFSET)
21347+#endif
21348+
21349 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
21350 __CPUINITRODATA
21351 .code16
21352@@ -60,7 +66,7 @@ r_base = .
21353 inc %ax # protected mode (PE) bit
21354 lmsw %ax # into protected mode
21355 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
21356- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
21357+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
21358
21359 # These need to be in the same 64K segment as the above;
21360 # hence we don't use the boot_gdt_descr defined in head.S
21361diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
21362index 3af2dff..ba8aa49 100644
21363--- a/arch/x86/kernel/trampoline_64.S
21364+++ b/arch/x86/kernel/trampoline_64.S
21365@@ -91,7 +91,7 @@ startup_32:
21366 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
21367 movl %eax, %ds
21368
21369- movl $X86_CR4_PAE, %eax
21370+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
21371 movl %eax, %cr4 # Enable PAE mode
21372
21373 # Setup trampoline 4 level pagetables
21374@@ -127,7 +127,7 @@ startup_64:
21375 no_longmode:
21376 hlt
21377 jmp no_longmode
21378-#include "verify_cpu_64.S"
21379+#include "verify_cpu.S"
21380
21381 # Careful these need to be in the same 64K segment as the above;
21382 tidt:
21383@@ -138,7 +138,7 @@ tidt:
21384 # so the kernel can live anywhere
21385 .balign 4
21386 tgdt:
21387- .short tgdt_end - tgdt # gdt limit
21388+ .short tgdt_end - tgdt - 1 # gdt limit
21389 .long tgdt - r_base
21390 .short 0
21391 .quad 0x00cf9b000000ffff # __KERNEL32_CS
21392diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
21393index 7e37dce..ec3f8e5 100644
21394--- a/arch/x86/kernel/traps.c
21395+++ b/arch/x86/kernel/traps.c
21396@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
21397
21398 /* Do we ignore FPU interrupts ? */
21399 char ignore_fpu_irq;
21400-
21401-/*
21402- * The IDT has to be page-aligned to simplify the Pentium
21403- * F0 0F bug workaround.
21404- */
21405-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
21406 #endif
21407
21408 DECLARE_BITMAP(used_vectors, NR_VECTORS);
21409@@ -112,19 +106,19 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
21410 static inline void
21411 die_if_kernel(const char *str, struct pt_regs *regs, long err)
21412 {
21413- if (!user_mode_vm(regs))
21414+ if (!user_mode(regs))
21415 die(str, regs, err);
21416 }
21417 #endif
21418
21419 static void __kprobes
21420-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
21421+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
21422 long error_code, siginfo_t *info)
21423 {
21424 struct task_struct *tsk = current;
21425
21426 #ifdef CONFIG_X86_32
21427- if (regs->flags & X86_VM_MASK) {
21428+ if (v8086_mode(regs)) {
21429 /*
21430 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
21431 * On nmi (interrupt 2), do_trap should not be called.
21432@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
21433 }
21434 #endif
21435
21436- if (!user_mode(regs))
21437+ if (!user_mode_novm(regs))
21438 goto kernel_trap;
21439
21440 #ifdef CONFIG_X86_32
21441@@ -158,7 +152,7 @@ trap_signal:
21442 printk_ratelimit()) {
21443 printk(KERN_INFO
21444 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
21445- tsk->comm, tsk->pid, str,
21446+ tsk->comm, task_pid_nr(tsk), str,
21447 regs->ip, regs->sp, error_code);
21448 print_vma_addr(" in ", regs->ip);
21449 printk("\n");
21450@@ -175,8 +169,20 @@ kernel_trap:
21451 if (!fixup_exception(regs)) {
21452 tsk->thread.error_code = error_code;
21453 tsk->thread.trap_no = trapnr;
21454+
21455+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21456+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
21457+ str = "PAX: suspicious stack segment fault";
21458+#endif
21459+
21460 die(str, regs, error_code);
21461 }
21462+
21463+#ifdef CONFIG_PAX_REFCOUNT
21464+ if (trapnr == 4)
21465+ pax_report_refcount_overflow(regs);
21466+#endif
21467+
21468 return;
21469
21470 #ifdef CONFIG_X86_32
21471@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
21472 conditional_sti(regs);
21473
21474 #ifdef CONFIG_X86_32
21475- if (regs->flags & X86_VM_MASK)
21476+ if (v8086_mode(regs))
21477 goto gp_in_vm86;
21478 #endif
21479
21480 tsk = current;
21481- if (!user_mode(regs))
21482+ if (!user_mode_novm(regs))
21483 goto gp_in_kernel;
21484
21485+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21486+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
21487+ struct mm_struct *mm = tsk->mm;
21488+ unsigned long limit;
21489+
21490+ down_write(&mm->mmap_sem);
21491+ limit = mm->context.user_cs_limit;
21492+ if (limit < TASK_SIZE) {
21493+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
21494+ up_write(&mm->mmap_sem);
21495+ return;
21496+ }
21497+ up_write(&mm->mmap_sem);
21498+ }
21499+#endif
21500+
21501 tsk->thread.error_code = error_code;
21502 tsk->thread.trap_no = 13;
21503
21504@@ -305,6 +327,13 @@ gp_in_kernel:
21505 if (notify_die(DIE_GPF, "general protection fault", regs,
21506 error_code, 13, SIGSEGV) == NOTIFY_STOP)
21507 return;
21508+
21509+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21510+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
21511+ die("PAX: suspicious general protection fault", regs, error_code);
21512+ else
21513+#endif
21514+
21515 die("general protection fault", regs, error_code);
21516 }
21517
21518@@ -435,6 +464,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
21519 dotraplinkage notrace __kprobes void
21520 do_nmi(struct pt_regs *regs, long error_code)
21521 {
21522+
21523+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21524+ if (!user_mode(regs)) {
21525+ unsigned long cs = regs->cs & 0xFFFF;
21526+ unsigned long ip = ktva_ktla(regs->ip);
21527+
21528+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
21529+ regs->ip = ip;
21530+ }
21531+#endif
21532+
21533 nmi_enter();
21534
21535 inc_irq_stat(__nmi_count);
21536@@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
21537 }
21538
21539 #ifdef CONFIG_X86_32
21540- if (regs->flags & X86_VM_MASK)
21541+ if (v8086_mode(regs))
21542 goto debug_vm86;
21543 #endif
21544
21545@@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
21546 * kernel space (but re-enable TF when returning to user mode).
21547 */
21548 if (condition & DR_STEP) {
21549- if (!user_mode(regs))
21550+ if (!user_mode_novm(regs))
21551 goto clear_TF_reenable;
21552 }
21553
21554@@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
21555 * Handle strange cache flush from user space exception
21556 * in all other cases. This is undocumented behaviour.
21557 */
21558- if (regs->flags & X86_VM_MASK) {
21559+ if (v8086_mode(regs)) {
21560 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
21561 return;
21562 }
21563@@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
21564 void __math_state_restore(void)
21565 {
21566 struct thread_info *thread = current_thread_info();
21567- struct task_struct *tsk = thread->task;
21568+ struct task_struct *tsk = current;
21569
21570 /*
21571 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
21572@@ -825,8 +865,7 @@ void __math_state_restore(void)
21573 */
21574 asmlinkage void math_state_restore(void)
21575 {
21576- struct thread_info *thread = current_thread_info();
21577- struct task_struct *tsk = thread->task;
21578+ struct task_struct *tsk = current;
21579
21580 if (!tsk_used_math(tsk)) {
21581 local_irq_enable();
21582diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
21583new file mode 100644
21584index 0000000..50c5edd
21585--- /dev/null
21586+++ b/arch/x86/kernel/verify_cpu.S
21587@@ -0,0 +1,140 @@
21588+/*
21589+ *
21590+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
21591+ * code has been borrowed from boot/setup.S and was introduced by
21592+ * Andi Kleen.
21593+ *
21594+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
21595+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
21596+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
21597+ * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
21598+ *
21599+ * This source code is licensed under the GNU General Public License,
21600+ * Version 2. See the file COPYING for more details.
21601+ *
21602+ * This is a common code for verification whether CPU supports
21603+ * long mode and SSE or not. It is not called directly instead this
21604+ * file is included at various places and compiled in that context.
21605+ * This file is expected to run in 32bit code. Currently:
21606+ *
21607+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
21608+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
21609+ * arch/x86/kernel/head_32.S: processor startup
21610+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
21611+ *
21612+ * verify_cpu, returns the status of longmode and SSE in register %eax.
21613+ * 0: Success 1: Failure
21614+ *
21615+ * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
21616+ *
21617+ * The caller needs to check for the error code and take the action
21618+ * appropriately. Either display a message or halt.
21619+ */
21620+
21621+#include <asm/cpufeature.h>
21622+#include <asm/msr-index.h>
21623+
21624+verify_cpu:
21625+ pushfl # Save caller passed flags
21626+ pushl $0 # Kill any dangerous flags
21627+ popfl
21628+
21629+ pushfl # standard way to check for cpuid
21630+ popl %eax
21631+ movl %eax,%ebx
21632+ xorl $0x200000,%eax
21633+ pushl %eax
21634+ popfl
21635+ pushfl
21636+ popl %eax
21637+ cmpl %eax,%ebx
21638+ jz verify_cpu_no_longmode # cpu has no cpuid
21639+
21640+ movl $0x0,%eax # See if cpuid 1 is implemented
21641+ cpuid
21642+ cmpl $0x1,%eax
21643+ jb verify_cpu_no_longmode # no cpuid 1
21644+
21645+ xor %di,%di
21646+ cmpl $0x68747541,%ebx # AuthenticAMD
21647+ jnz verify_cpu_noamd
21648+ cmpl $0x69746e65,%edx
21649+ jnz verify_cpu_noamd
21650+ cmpl $0x444d4163,%ecx
21651+ jnz verify_cpu_noamd
21652+ mov $1,%di # cpu is from AMD
21653+ jmp verify_cpu_check
21654+
21655+verify_cpu_noamd:
21656+ cmpl $0x756e6547,%ebx # GenuineIntel?
21657+ jnz verify_cpu_check
21658+ cmpl $0x49656e69,%edx
21659+ jnz verify_cpu_check
21660+ cmpl $0x6c65746e,%ecx
21661+ jnz verify_cpu_check
21662+
21663+ # only call IA32_MISC_ENABLE when:
21664+ # family > 6 || (family == 6 && model >= 0xd)
21665+ movl $0x1, %eax # check CPU family and model
21666+ cpuid
21667+ movl %eax, %ecx
21668+
21669+ andl $0x0ff00f00, %eax # mask family and extended family
21670+ shrl $8, %eax
21671+ cmpl $6, %eax
21672+ ja verify_cpu_clear_xd # family > 6, ok
21673+ jb verify_cpu_check # family < 6, skip
21674+
21675+ andl $0x000f00f0, %ecx # mask model and extended model
21676+ shrl $4, %ecx
21677+ cmpl $0xd, %ecx
21678+ jb verify_cpu_check # family == 6, model < 0xd, skip
21679+
21680+verify_cpu_clear_xd:
21681+ movl $MSR_IA32_MISC_ENABLE, %ecx
21682+ rdmsr
21683+ btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
21684+ jnc verify_cpu_check # only write MSR if bit was changed
21685+ wrmsr
21686+
21687+verify_cpu_check:
21688+ movl $0x1,%eax # Does the cpu have what it takes
21689+ cpuid
21690+ andl $REQUIRED_MASK0,%edx
21691+ xorl $REQUIRED_MASK0,%edx
21692+ jnz verify_cpu_no_longmode
21693+
21694+ movl $0x80000000,%eax # See if extended cpuid is implemented
21695+ cpuid
21696+ cmpl $0x80000001,%eax
21697+ jb verify_cpu_no_longmode # no extended cpuid
21698+
21699+ movl $0x80000001,%eax # Does the cpu have what it takes
21700+ cpuid
21701+ andl $REQUIRED_MASK1,%edx
21702+ xorl $REQUIRED_MASK1,%edx
21703+ jnz verify_cpu_no_longmode
21704+
21705+verify_cpu_sse_test:
21706+ movl $1,%eax
21707+ cpuid
21708+ andl $SSE_MASK,%edx
21709+ cmpl $SSE_MASK,%edx
21710+ je verify_cpu_sse_ok
21711+ test %di,%di
21712+ jz verify_cpu_no_longmode # only try to force SSE on AMD
21713+ movl $MSR_K7_HWCR,%ecx
21714+ rdmsr
21715+ btr $15,%eax # enable SSE
21716+ wrmsr
21717+ xor %di,%di # don't loop
21718+ jmp verify_cpu_sse_test # try again
21719+
21720+verify_cpu_no_longmode:
21721+ popfl # Restore caller passed flags
21722+ movl $1,%eax
21723+ ret
21724+verify_cpu_sse_ok:
21725+ popfl # Restore caller passed flags
21726+ xorl %eax, %eax
21727+ ret
21728diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
21729deleted file mode 100644
21730index 45b6f8a..0000000
21731--- a/arch/x86/kernel/verify_cpu_64.S
21732+++ /dev/null
21733@@ -1,105 +0,0 @@
21734-/*
21735- *
21736- * verify_cpu.S - Code for cpu long mode and SSE verification. This
21737- * code has been borrowed from boot/setup.S and was introduced by
21738- * Andi Kleen.
21739- *
21740- * Copyright (c) 2007 Andi Kleen (ak@suse.de)
21741- * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
21742- * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
21743- *
21744- * This source code is licensed under the GNU General Public License,
21745- * Version 2. See the file COPYING for more details.
21746- *
21747- * This is a common code for verification whether CPU supports
21748- * long mode and SSE or not. It is not called directly instead this
21749- * file is included at various places and compiled in that context.
21750- * Following are the current usage.
21751- *
21752- * This file is included by both 16bit and 32bit code.
21753- *
21754- * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
21755- * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
21756- * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
21757- * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
21758- *
21759- * verify_cpu, returns the status of cpu check in register %eax.
21760- * 0: Success 1: Failure
21761- *
21762- * The caller needs to check for the error code and take the action
21763- * appropriately. Either display a message or halt.
21764- */
21765-
21766-#include <asm/cpufeature.h>
21767-
21768-verify_cpu:
21769- pushfl # Save caller passed flags
21770- pushl $0 # Kill any dangerous flags
21771- popfl
21772-
21773- pushfl # standard way to check for cpuid
21774- popl %eax
21775- movl %eax,%ebx
21776- xorl $0x200000,%eax
21777- pushl %eax
21778- popfl
21779- pushfl
21780- popl %eax
21781- cmpl %eax,%ebx
21782- jz verify_cpu_no_longmode # cpu has no cpuid
21783-
21784- movl $0x0,%eax # See if cpuid 1 is implemented
21785- cpuid
21786- cmpl $0x1,%eax
21787- jb verify_cpu_no_longmode # no cpuid 1
21788-
21789- xor %di,%di
21790- cmpl $0x68747541,%ebx # AuthenticAMD
21791- jnz verify_cpu_noamd
21792- cmpl $0x69746e65,%edx
21793- jnz verify_cpu_noamd
21794- cmpl $0x444d4163,%ecx
21795- jnz verify_cpu_noamd
21796- mov $1,%di # cpu is from AMD
21797-
21798-verify_cpu_noamd:
21799- movl $0x1,%eax # Does the cpu have what it takes
21800- cpuid
21801- andl $REQUIRED_MASK0,%edx
21802- xorl $REQUIRED_MASK0,%edx
21803- jnz verify_cpu_no_longmode
21804-
21805- movl $0x80000000,%eax # See if extended cpuid is implemented
21806- cpuid
21807- cmpl $0x80000001,%eax
21808- jb verify_cpu_no_longmode # no extended cpuid
21809-
21810- movl $0x80000001,%eax # Does the cpu have what it takes
21811- cpuid
21812- andl $REQUIRED_MASK1,%edx
21813- xorl $REQUIRED_MASK1,%edx
21814- jnz verify_cpu_no_longmode
21815-
21816-verify_cpu_sse_test:
21817- movl $1,%eax
21818- cpuid
21819- andl $SSE_MASK,%edx
21820- cmpl $SSE_MASK,%edx
21821- je verify_cpu_sse_ok
21822- test %di,%di
21823- jz verify_cpu_no_longmode # only try to force SSE on AMD
21824- movl $0xc0010015,%ecx # HWCR
21825- rdmsr
21826- btr $15,%eax # enable SSE
21827- wrmsr
21828- xor %di,%di # don't loop
21829- jmp verify_cpu_sse_test # try again
21830-
21831-verify_cpu_no_longmode:
21832- popfl # Restore caller passed flags
21833- movl $1,%eax
21834- ret
21835-verify_cpu_sse_ok:
21836- popfl # Restore caller passed flags
21837- xorl %eax, %eax
21838- ret
21839diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
21840index 9c4e625..e9bb4ed 100644
21841--- a/arch/x86/kernel/vm86_32.c
21842+++ b/arch/x86/kernel/vm86_32.c
21843@@ -41,6 +41,7 @@
21844 #include <linux/ptrace.h>
21845 #include <linux/audit.h>
21846 #include <linux/stddef.h>
21847+#include <linux/grsecurity.h>
21848
21849 #include <asm/uaccess.h>
21850 #include <asm/io.h>
21851@@ -109,6 +110,9 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
21852 /* convert vm86_regs to kernel_vm86_regs */
21853 static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
21854 const struct vm86_regs __user *user,
21855+ unsigned extra) __size_overflow(3);
21856+static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
21857+ const struct vm86_regs __user *user,
21858 unsigned extra)
21859 {
21860 int ret = 0;
21861@@ -148,7 +152,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
21862 do_exit(SIGSEGV);
21863 }
21864
21865- tss = &per_cpu(init_tss, get_cpu());
21866+ tss = init_tss + get_cpu();
21867 current->thread.sp0 = current->thread.saved_sp0;
21868 current->thread.sysenter_cs = __KERNEL_CS;
21869 load_sp0(tss, &current->thread);
21870@@ -208,6 +212,13 @@ int sys_vm86old(struct pt_regs *regs)
21871 struct task_struct *tsk;
21872 int tmp, ret = -EPERM;
21873
21874+#ifdef CONFIG_GRKERNSEC_VM86
21875+ if (!capable(CAP_SYS_RAWIO)) {
21876+ gr_handle_vm86();
21877+ goto out;
21878+ }
21879+#endif
21880+
21881 tsk = current;
21882 if (tsk->thread.saved_sp0)
21883 goto out;
21884@@ -238,6 +249,14 @@ int sys_vm86(struct pt_regs *regs)
21885 int tmp, ret;
21886 struct vm86plus_struct __user *v86;
21887
21888+#ifdef CONFIG_GRKERNSEC_VM86
21889+ if (!capable(CAP_SYS_RAWIO)) {
21890+ gr_handle_vm86();
21891+ ret = -EPERM;
21892+ goto out;
21893+ }
21894+#endif
21895+
21896 tsk = current;
21897 switch (regs->bx) {
21898 case VM86_REQUEST_IRQ:
21899@@ -324,7 +343,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
21900 tsk->thread.saved_fs = info->regs32->fs;
21901 tsk->thread.saved_gs = get_user_gs(info->regs32);
21902
21903- tss = &per_cpu(init_tss, get_cpu());
21904+ tss = init_tss + get_cpu();
21905 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
21906 if (cpu_has_sep)
21907 tsk->thread.sysenter_cs = 0;
21908@@ -529,7 +548,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
21909 goto cannot_handle;
21910 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
21911 goto cannot_handle;
21912- intr_ptr = (unsigned long __user *) (i << 2);
21913+ intr_ptr = (__force unsigned long __user *) (i << 2);
21914 if (get_user(segoffs, intr_ptr))
21915 goto cannot_handle;
21916 if ((segoffs >> 16) == BIOSSEG)
21917diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
21918index d430e4c..831f817 100644
21919--- a/arch/x86/kernel/vmi_32.c
21920+++ b/arch/x86/kernel/vmi_32.c
21921@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
21922 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
21923
21924 #define call_vrom_func(rom,func) \
21925- (((VROMFUNC *)(rom->func))())
21926+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
21927
21928 #define call_vrom_long_func(rom,func,arg) \
21929- (((VROMLONGFUNC *)(rom->func)) (arg))
21930+({\
21931+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
21932+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
21933+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
21934+ __reloc;\
21935+})
21936
21937-static struct vrom_header *vmi_rom;
21938+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
21939 static int disable_pge;
21940 static int disable_pse;
21941 static int disable_sep;
21942@@ -76,10 +81,10 @@ static struct {
21943 void (*set_initial_ap_state)(int, int);
21944 void (*halt)(void);
21945 void (*set_lazy_mode)(int mode);
21946-} vmi_ops;
21947+} __no_const vmi_ops __read_only;
21948
21949 /* Cached VMI operations */
21950-struct vmi_timer_ops vmi_timer_ops;
21951+struct vmi_timer_ops vmi_timer_ops __read_only;
21952
21953 /*
21954 * VMI patching routines.
21955@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
21956 static inline void patch_offset(void *insnbuf,
21957 unsigned long ip, unsigned long dest)
21958 {
21959- *(unsigned long *)(insnbuf+1) = dest-ip-5;
21960+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
21961 }
21962
21963 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
21964@@ -102,6 +107,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
21965 {
21966 u64 reloc;
21967 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
21968+
21969 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
21970 switch(rel->type) {
21971 case VMI_RELOCATION_CALL_REL:
21972@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
21973
21974 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
21975 {
21976- const pte_t pte = { .pte = 0 };
21977+ const pte_t pte = __pte(0ULL);
21978 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
21979 }
21980
21981 static void vmi_pmd_clear(pmd_t *pmd)
21982 {
21983- const pte_t pte = { .pte = 0 };
21984+ const pte_t pte = __pte(0ULL);
21985 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
21986 }
21987 #endif
21988@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
21989 ap.ss = __KERNEL_DS;
21990 ap.esp = (unsigned long) start_esp;
21991
21992- ap.ds = __USER_DS;
21993- ap.es = __USER_DS;
21994+ ap.ds = __KERNEL_DS;
21995+ ap.es = __KERNEL_DS;
21996 ap.fs = __KERNEL_PERCPU;
21997- ap.gs = __KERNEL_STACK_CANARY;
21998+ savesegment(gs, ap.gs);
21999
22000 ap.eflags = 0;
22001
22002@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
22003 paravirt_leave_lazy_mmu();
22004 }
22005
22006+#ifdef CONFIG_PAX_KERNEXEC
22007+static unsigned long vmi_pax_open_kernel(void)
22008+{
22009+ return 0;
22010+}
22011+
22012+static unsigned long vmi_pax_close_kernel(void)
22013+{
22014+ return 0;
22015+}
22016+#endif
22017+
22018 static inline int __init check_vmi_rom(struct vrom_header *rom)
22019 {
22020 struct pci_header *pci;
22021@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
22022 return 0;
22023 if (rom->vrom_signature != VMI_SIGNATURE)
22024 return 0;
22025+ if (rom->rom_length * 512 > sizeof(*rom)) {
22026+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
22027+ return 0;
22028+ }
22029 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
22030 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
22031 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
22032@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(void)
22033 struct vrom_header *romstart;
22034 romstart = (struct vrom_header *)isa_bus_to_virt(base);
22035 if (check_vmi_rom(romstart)) {
22036- vmi_rom = romstart;
22037+ vmi_rom = *romstart;
22038 return 1;
22039 }
22040 }
22041@@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
22042
22043 para_fill(pv_irq_ops.safe_halt, Halt);
22044
22045+#ifdef CONFIG_PAX_KERNEXEC
22046+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
22047+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
22048+#endif
22049+
22050 /*
22051 * Alternative instruction rewriting doesn't happen soon enough
22052 * to convert VMI_IRET to a call instead of a jump; so we have
22053@@ -853,16 +880,16 @@ static inline int __init activate_vmi(void)
22054
22055 void __init vmi_init(void)
22056 {
22057- if (!vmi_rom)
22058+ if (!vmi_rom.rom_signature)
22059 probe_vmi_rom();
22060 else
22061- check_vmi_rom(vmi_rom);
22062+ check_vmi_rom(&vmi_rom);
22063
22064 /* In case probing for or validating the ROM failed, basil */
22065- if (!vmi_rom)
22066+ if (!vmi_rom.rom_signature)
22067 return;
22068
22069- reserve_top_address(-vmi_rom->virtual_top);
22070+ reserve_top_address(-vmi_rom.virtual_top);
22071
22072 #ifdef CONFIG_X86_IO_APIC
22073 /* This is virtual hardware; timer routing is wired correctly */
22074@@ -874,7 +901,7 @@ void __init vmi_activate(void)
22075 {
22076 unsigned long flags;
22077
22078- if (!vmi_rom)
22079+ if (!vmi_rom.rom_signature)
22080 return;
22081
22082 local_irq_save(flags);
22083diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
22084index 3c68fe2..12c8280 100644
22085--- a/arch/x86/kernel/vmlinux.lds.S
22086+++ b/arch/x86/kernel/vmlinux.lds.S
22087@@ -26,6 +26,13 @@
22088 #include <asm/page_types.h>
22089 #include <asm/cache.h>
22090 #include <asm/boot.h>
22091+#include <asm/segment.h>
22092+
22093+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22094+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
22095+#else
22096+#define __KERNEL_TEXT_OFFSET 0
22097+#endif
22098
22099 #undef i386 /* in case the preprocessor is a 32bit one */
22100
22101@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
22102 #ifdef CONFIG_X86_32
22103 OUTPUT_ARCH(i386)
22104 ENTRY(phys_startup_32)
22105-jiffies = jiffies_64;
22106 #else
22107 OUTPUT_ARCH(i386:x86-64)
22108 ENTRY(phys_startup_64)
22109-jiffies_64 = jiffies;
22110 #endif
22111
22112 PHDRS {
22113 text PT_LOAD FLAGS(5); /* R_E */
22114- data PT_LOAD FLAGS(7); /* RWE */
22115+#ifdef CONFIG_X86_32
22116+ module PT_LOAD FLAGS(5); /* R_E */
22117+#endif
22118+#ifdef CONFIG_XEN
22119+ rodata PT_LOAD FLAGS(5); /* R_E */
22120+#else
22121+ rodata PT_LOAD FLAGS(4); /* R__ */
22122+#endif
22123+ data PT_LOAD FLAGS(6); /* RW_ */
22124 #ifdef CONFIG_X86_64
22125 user PT_LOAD FLAGS(5); /* R_E */
22126+#endif
22127+ init.begin PT_LOAD FLAGS(6); /* RW_ */
22128 #ifdef CONFIG_SMP
22129 percpu PT_LOAD FLAGS(6); /* RW_ */
22130 #endif
22131+ text.init PT_LOAD FLAGS(5); /* R_E */
22132+ text.exit PT_LOAD FLAGS(5); /* R_E */
22133 init PT_LOAD FLAGS(7); /* RWE */
22134-#endif
22135 note PT_NOTE FLAGS(0); /* ___ */
22136 }
22137
22138 SECTIONS
22139 {
22140 #ifdef CONFIG_X86_32
22141- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
22142- phys_startup_32 = startup_32 - LOAD_OFFSET;
22143+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
22144 #else
22145- . = __START_KERNEL;
22146- phys_startup_64 = startup_64 - LOAD_OFFSET;
22147+ . = __START_KERNEL;
22148 #endif
22149
22150 /* Text and read-only data */
22151- .text : AT(ADDR(.text) - LOAD_OFFSET) {
22152- _text = .;
22153+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
22154 /* bootstrapping code */
22155+#ifdef CONFIG_X86_32
22156+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
22157+#else
22158+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
22159+#endif
22160+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
22161+ _text = .;
22162 HEAD_TEXT
22163 #ifdef CONFIG_X86_32
22164 . = ALIGN(PAGE_SIZE);
22165@@ -82,28 +102,71 @@ SECTIONS
22166 IRQENTRY_TEXT
22167 *(.fixup)
22168 *(.gnu.warning)
22169- /* End of text section */
22170- _etext = .;
22171 } :text = 0x9090
22172
22173- NOTES :text :note
22174+ . += __KERNEL_TEXT_OFFSET;
22175
22176- EXCEPTION_TABLE(16) :text = 0x9090
22177+#ifdef CONFIG_X86_32
22178+ . = ALIGN(PAGE_SIZE);
22179+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
22180+ *(.vmi.rom)
22181+ } :module
22182+
22183+ . = ALIGN(PAGE_SIZE);
22184+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
22185+
22186+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
22187+ MODULES_EXEC_VADDR = .;
22188+ BYTE(0)
22189+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
22190+ . = ALIGN(HPAGE_SIZE);
22191+ MODULES_EXEC_END = . - 1;
22192+#endif
22193+
22194+ } :module
22195+#endif
22196+
22197+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
22198+ /* End of text section */
22199+ _etext = . - __KERNEL_TEXT_OFFSET;
22200+ }
22201+
22202+#ifdef CONFIG_X86_32
22203+ . = ALIGN(PAGE_SIZE);
22204+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
22205+ *(.idt)
22206+ . = ALIGN(PAGE_SIZE);
22207+ *(.empty_zero_page)
22208+ *(.swapper_pg_fixmap)
22209+ *(.swapper_pg_pmd)
22210+ *(.swapper_pg_dir)
22211+ *(.trampoline_pg_dir)
22212+ } :rodata
22213+#endif
22214+
22215+ . = ALIGN(PAGE_SIZE);
22216+ NOTES :rodata :note
22217+
22218+ EXCEPTION_TABLE(16) :rodata
22219
22220 RO_DATA(PAGE_SIZE)
22221
22222 /* Data */
22223 .data : AT(ADDR(.data) - LOAD_OFFSET) {
22224+
22225+#ifdef CONFIG_PAX_KERNEXEC
22226+ . = ALIGN(HPAGE_SIZE);
22227+#else
22228+ . = ALIGN(PAGE_SIZE);
22229+#endif
22230+
22231 /* Start of data section */
22232 _sdata = .;
22233
22234 /* init_task */
22235 INIT_TASK_DATA(THREAD_SIZE)
22236
22237-#ifdef CONFIG_X86_32
22238- /* 32 bit has nosave before _edata */
22239 NOSAVE_DATA
22240-#endif
22241
22242 PAGE_ALIGNED_DATA(PAGE_SIZE)
22243
22244@@ -112,6 +175,8 @@ SECTIONS
22245 DATA_DATA
22246 CONSTRUCTORS
22247
22248+ jiffies = jiffies_64;
22249+
22250 /* rarely changed data like cpu maps */
22251 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
22252
22253@@ -166,12 +231,6 @@ SECTIONS
22254 }
22255 vgetcpu_mode = VVIRT(.vgetcpu_mode);
22256
22257- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
22258- .jiffies : AT(VLOAD(.jiffies)) {
22259- *(.jiffies)
22260- }
22261- jiffies = VVIRT(.jiffies);
22262-
22263 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
22264 *(.vsyscall_3)
22265 }
22266@@ -187,12 +246,19 @@ SECTIONS
22267 #endif /* CONFIG_X86_64 */
22268
22269 /* Init code and data - will be freed after init */
22270- . = ALIGN(PAGE_SIZE);
22271 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
22272+ BYTE(0)
22273+
22274+#ifdef CONFIG_PAX_KERNEXEC
22275+ . = ALIGN(HPAGE_SIZE);
22276+#else
22277+ . = ALIGN(PAGE_SIZE);
22278+#endif
22279+
22280 __init_begin = .; /* paired with __init_end */
22281- }
22282+ } :init.begin
22283
22284-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
22285+#ifdef CONFIG_SMP
22286 /*
22287 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
22288 * output PHDR, so the next output section - .init.text - should
22289@@ -201,12 +267,27 @@ SECTIONS
22290 PERCPU_VADDR(0, :percpu)
22291 #endif
22292
22293- INIT_TEXT_SECTION(PAGE_SIZE)
22294-#ifdef CONFIG_X86_64
22295- :init
22296-#endif
22297+ . = ALIGN(PAGE_SIZE);
22298+ init_begin = .;
22299+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
22300+ VMLINUX_SYMBOL(_sinittext) = .;
22301+ INIT_TEXT
22302+ VMLINUX_SYMBOL(_einittext) = .;
22303+ . = ALIGN(PAGE_SIZE);
22304+ } :text.init
22305
22306- INIT_DATA_SECTION(16)
22307+ /*
22308+ * .exit.text is discard at runtime, not link time, to deal with
22309+ * references from .altinstructions and .eh_frame
22310+ */
22311+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
22312+ EXIT_TEXT
22313+ . = ALIGN(16);
22314+ } :text.exit
22315+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
22316+
22317+ . = ALIGN(PAGE_SIZE);
22318+ INIT_DATA_SECTION(16) :init
22319
22320 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
22321 __x86_cpu_dev_start = .;
22322@@ -232,19 +313,11 @@ SECTIONS
22323 *(.altinstr_replacement)
22324 }
22325
22326- /*
22327- * .exit.text is discard at runtime, not link time, to deal with
22328- * references from .altinstructions and .eh_frame
22329- */
22330- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
22331- EXIT_TEXT
22332- }
22333-
22334 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
22335 EXIT_DATA
22336 }
22337
22338-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
22339+#ifndef CONFIG_SMP
22340 PERCPU(PAGE_SIZE)
22341 #endif
22342
22343@@ -267,12 +340,6 @@ SECTIONS
22344 . = ALIGN(PAGE_SIZE);
22345 }
22346
22347-#ifdef CONFIG_X86_64
22348- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
22349- NOSAVE_DATA
22350- }
22351-#endif
22352-
22353 /* BSS */
22354 . = ALIGN(PAGE_SIZE);
22355 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
22356@@ -288,6 +355,7 @@ SECTIONS
22357 __brk_base = .;
22358 . += 64 * 1024; /* 64k alignment slop space */
22359 *(.brk_reservation) /* areas brk users have reserved */
22360+ . = ALIGN(HPAGE_SIZE);
22361 __brk_limit = .;
22362 }
22363
22364@@ -316,13 +384,12 @@ SECTIONS
22365 * for the boot processor.
22366 */
22367 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
22368-INIT_PER_CPU(gdt_page);
22369 INIT_PER_CPU(irq_stack_union);
22370
22371 /*
22372 * Build-time check on the image size:
22373 */
22374-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
22375+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
22376 "kernel image bigger than KERNEL_IMAGE_SIZE");
22377
22378 #ifdef CONFIG_SMP
22379diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
22380index 62f39d7..3bc46a1 100644
22381--- a/arch/x86/kernel/vsyscall_64.c
22382+++ b/arch/x86/kernel/vsyscall_64.c
22383@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
22384
22385 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
22386 /* copy vsyscall data */
22387+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
22388 vsyscall_gtod_data.clock.vread = clock->vread;
22389 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
22390 vsyscall_gtod_data.clock.mask = clock->mask;
22391@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
22392 We do this here because otherwise user space would do it on
22393 its own in a likely inferior way (no access to jiffies).
22394 If you don't like it pass NULL. */
22395- if (tcache && tcache->blob[0] == (j = __jiffies)) {
22396+ if (tcache && tcache->blob[0] == (j = jiffies)) {
22397 p = tcache->blob[1];
22398 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
22399 /* Load per CPU data from RDTSCP */
22400diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
22401index 3909e3b..5433a97 100644
22402--- a/arch/x86/kernel/x8664_ksyms_64.c
22403+++ b/arch/x86/kernel/x8664_ksyms_64.c
22404@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
22405
22406 EXPORT_SYMBOL(copy_user_generic);
22407 EXPORT_SYMBOL(__copy_user_nocache);
22408-EXPORT_SYMBOL(copy_from_user);
22409-EXPORT_SYMBOL(copy_to_user);
22410 EXPORT_SYMBOL(__copy_from_user_inatomic);
22411
22412 EXPORT_SYMBOL(copy_page);
22413diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
22414index c5ee17e..d63218f 100644
22415--- a/arch/x86/kernel/xsave.c
22416+++ b/arch/x86/kernel/xsave.c
22417@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
22418 fx_sw_user->xstate_size > fx_sw_user->extended_size)
22419 return -1;
22420
22421- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
22422+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
22423 fx_sw_user->extended_size -
22424 FP_XSTATE_MAGIC2_SIZE));
22425 /*
22426@@ -196,7 +196,7 @@ fx_only:
22427 * the other extended state.
22428 */
22429 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
22430- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
22431+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
22432 }
22433
22434 /*
22435@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
22436 if (task_thread_info(tsk)->status & TS_XSAVE)
22437 err = restore_user_xstate(buf);
22438 else
22439- err = fxrstor_checking((__force struct i387_fxsave_struct *)
22440+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
22441 buf);
22442 if (unlikely(err)) {
22443 /*
22444diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
22445index 1350e43..a94b011 100644
22446--- a/arch/x86/kvm/emulate.c
22447+++ b/arch/x86/kvm/emulate.c
22448@@ -81,8 +81,8 @@
22449 #define Src2CL (1<<29)
22450 #define Src2ImmByte (2<<29)
22451 #define Src2One (3<<29)
22452-#define Src2Imm16 (4<<29)
22453-#define Src2Mask (7<<29)
22454+#define Src2Imm16 (4U<<29)
22455+#define Src2Mask (7U<<29)
22456
22457 enum {
22458 Group1_80, Group1_81, Group1_82, Group1_83,
22459@@ -411,6 +411,7 @@ static u32 group2_table[] = {
22460
22461 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
22462 do { \
22463+ unsigned long _tmp; \
22464 __asm__ __volatile__ ( \
22465 _PRE_EFLAGS("0", "4", "2") \
22466 _op _suffix " %"_x"3,%1; " \
22467@@ -424,8 +425,6 @@ static u32 group2_table[] = {
22468 /* Raw emulation: instruction has two explicit operands. */
22469 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
22470 do { \
22471- unsigned long _tmp; \
22472- \
22473 switch ((_dst).bytes) { \
22474 case 2: \
22475 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
22476@@ -441,7 +440,6 @@ static u32 group2_table[] = {
22477
22478 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
22479 do { \
22480- unsigned long _tmp; \
22481 switch ((_dst).bytes) { \
22482 case 1: \
22483 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
22484diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
22485index 8dfeaaa..4daa395 100644
22486--- a/arch/x86/kvm/lapic.c
22487+++ b/arch/x86/kvm/lapic.c
22488@@ -52,7 +52,7 @@
22489 #define APIC_BUS_CYCLE_NS 1
22490
22491 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
22492-#define apic_debug(fmt, arg...)
22493+#define apic_debug(fmt, arg...) do {} while (0)
22494
22495 #define APIC_LVT_NUM 6
22496 /* 14 is the version for Xeon and Pentium 8.4.8*/
22497diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
22498index 3bc2707..dd157e2 100644
22499--- a/arch/x86/kvm/paging_tmpl.h
22500+++ b/arch/x86/kvm/paging_tmpl.h
22501@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
22502 int level = PT_PAGE_TABLE_LEVEL;
22503 unsigned long mmu_seq;
22504
22505+ pax_track_stack();
22506+
22507 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
22508 kvm_mmu_audit(vcpu, "pre page fault");
22509
22510@@ -461,6 +463,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
22511 kvm_mmu_free_some_pages(vcpu);
22512 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
22513 level, &write_pt, pfn);
22514+ (void)sptep;
22515 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
22516 sptep, *sptep, write_pt);
22517
22518diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
22519index 7c6e63e..1b7dac1 100644
22520--- a/arch/x86/kvm/svm.c
22521+++ b/arch/x86/kvm/svm.c
22522@@ -2240,6 +2240,7 @@ static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
22523 return 1;
22524 }
22525
22526+static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) __size_overflow(3);
22527 static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
22528 {
22529 struct vcpu_svm *svm = to_svm(vcpu);
22530@@ -2486,7 +2487,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
22531 int cpu = raw_smp_processor_id();
22532
22533 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
22534+
22535+ pax_open_kernel();
22536 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
22537+ pax_close_kernel();
22538+
22539 load_TR_desc();
22540 }
22541
22542@@ -2947,7 +2952,7 @@ static bool svm_gb_page_enable(void)
22543 return true;
22544 }
22545
22546-static struct kvm_x86_ops svm_x86_ops = {
22547+static const struct kvm_x86_ops svm_x86_ops = {
22548 .cpu_has_kvm_support = has_svm,
22549 .disabled_by_bios = is_disabled,
22550 .hardware_setup = svm_hardware_setup,
22551diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
22552index e6d925f..8cdd779 100644
22553--- a/arch/x86/kvm/vmx.c
22554+++ b/arch/x86/kvm/vmx.c
22555@@ -570,7 +570,11 @@ static void reload_tss(void)
22556
22557 kvm_get_gdt(&gdt);
22558 descs = (void *)gdt.base;
22559+
22560+ pax_open_kernel();
22561 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
22562+ pax_close_kernel();
22563+
22564 load_TR_desc();
22565 }
22566
22567@@ -1035,6 +1039,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
22568 * Returns 0 on success, non-0 otherwise.
22569 * Assumes vcpu_load() was already called.
22570 */
22571+static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) __size_overflow(3);
22572 static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
22573 {
22574 struct vcpu_vmx *vmx = to_vmx(vcpu);
22575@@ -1410,8 +1415,11 @@ static __init int hardware_setup(void)
22576 if (!cpu_has_vmx_flexpriority())
22577 flexpriority_enabled = 0;
22578
22579- if (!cpu_has_vmx_tpr_shadow())
22580- kvm_x86_ops->update_cr8_intercept = NULL;
22581+ if (!cpu_has_vmx_tpr_shadow()) {
22582+ pax_open_kernel();
22583+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
22584+ pax_close_kernel();
22585+ }
22586
22587 if (enable_ept && !cpu_has_vmx_ept_2m_page())
22588 kvm_disable_largepages();
22589@@ -2362,7 +2370,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
22590 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
22591
22592 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
22593- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
22594+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
22595 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
22596 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
22597 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
22598@@ -3718,6 +3726,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
22599 "jmp .Lkvm_vmx_return \n\t"
22600 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
22601 ".Lkvm_vmx_return: "
22602+
22603+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22604+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
22605+ ".Lkvm_vmx_return2: "
22606+#endif
22607+
22608 /* Save guest registers, load host registers, keep flags */
22609 "xchg %0, (%%"R"sp) \n\t"
22610 "mov %%"R"ax, %c[rax](%0) \n\t"
22611@@ -3764,8 +3778,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
22612 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
22613 #endif
22614 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
22615+
22616+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22617+ ,[cs]"i"(__KERNEL_CS)
22618+#endif
22619+
22620 : "cc", "memory"
22621- , R"bx", R"di", R"si"
22622+ , R"ax", R"bx", R"di", R"si"
22623 #ifdef CONFIG_X86_64
22624 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
22625 #endif
22626@@ -3782,7 +3801,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
22627 if (vmx->rmode.irq.pending)
22628 fixup_rmode_irq(vmx);
22629
22630- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
22631+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
22632+
22633+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22634+ loadsegment(fs, __KERNEL_PERCPU);
22635+#endif
22636+
22637+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22638+ __set_fs(current_thread_info()->addr_limit);
22639+#endif
22640+
22641 vmx->launched = 1;
22642
22643 vmx_complete_interrupts(vmx);
22644@@ -3957,7 +3985,7 @@ static bool vmx_gb_page_enable(void)
22645 return false;
22646 }
22647
22648-static struct kvm_x86_ops vmx_x86_ops = {
22649+static const struct kvm_x86_ops vmx_x86_ops = {
22650 .cpu_has_kvm_support = cpu_has_kvm_support,
22651 .disabled_by_bios = vmx_disabled_by_bios,
22652 .hardware_setup = hardware_setup,
22653diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
22654index df1cefb..ff86cc2 100644
22655--- a/arch/x86/kvm/x86.c
22656+++ b/arch/x86/kvm/x86.c
22657@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
22658 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
22659 struct kvm_cpuid_entry2 __user *entries);
22660
22661-struct kvm_x86_ops *kvm_x86_ops;
22662+const struct kvm_x86_ops *kvm_x86_ops;
22663 EXPORT_SYMBOL_GPL(kvm_x86_ops);
22664
22665 int ignore_msrs = 0;
22666@@ -547,6 +547,7 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
22667 return kvm_set_msr(vcpu, index, *data);
22668 }
22669
22670+static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) __size_overflow(2);
22671 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
22672 {
22673 int version;
22674@@ -1430,15 +1431,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
22675 struct kvm_cpuid2 *cpuid,
22676 struct kvm_cpuid_entry2 __user *entries)
22677 {
22678- int r;
22679+ int r, i;
22680
22681 r = -E2BIG;
22682 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
22683 goto out;
22684 r = -EFAULT;
22685- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
22686- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
22687+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
22688 goto out;
22689+ for (i = 0; i < cpuid->nent; ++i) {
22690+ struct kvm_cpuid_entry2 cpuid_entry;
22691+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
22692+ goto out;
22693+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
22694+ }
22695 vcpu->arch.cpuid_nent = cpuid->nent;
22696 kvm_apic_set_version(vcpu);
22697 return 0;
22698@@ -1451,16 +1457,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
22699 struct kvm_cpuid2 *cpuid,
22700 struct kvm_cpuid_entry2 __user *entries)
22701 {
22702- int r;
22703+ int r, i;
22704
22705 vcpu_load(vcpu);
22706 r = -E2BIG;
22707 if (cpuid->nent < vcpu->arch.cpuid_nent)
22708 goto out;
22709 r = -EFAULT;
22710- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
22711- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
22712+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
22713 goto out;
22714+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
22715+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
22716+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
22717+ goto out;
22718+ }
22719 return 0;
22720
22721 out:
22722@@ -1678,7 +1688,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
22723 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
22724 struct kvm_interrupt *irq)
22725 {
22726- if (irq->irq < 0 || irq->irq >= 256)
22727+ if (irq->irq >= 256)
22728 return -EINVAL;
22729 if (irqchip_in_kernel(vcpu->kvm))
22730 return -ENXIO;
22731@@ -2764,7 +2774,14 @@ int emulator_write_emulated(unsigned long addr,
22732 }
22733 EXPORT_SYMBOL_GPL(emulator_write_emulated);
22734
22735-static int emulator_cmpxchg_emulated(unsigned long addr,
22736+static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
22737+ unsigned long addr,
22738+ const void *old,
22739+ const void *new,
22740+ unsigned int bytes,
22741+ struct kvm_vcpu *vcpu) __size_overflow(5);
22742+static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
22743+ unsigned long addr,
22744 const void *old,
22745 const void *new,
22746 unsigned int bytes,
22747@@ -3260,10 +3277,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
22748 .notifier_call = kvmclock_cpufreq_notifier
22749 };
22750
22751-int kvm_arch_init(void *opaque)
22752+int kvm_arch_init(const void *opaque)
22753 {
22754 int r, cpu;
22755- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
22756+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
22757
22758 if (kvm_x86_ops) {
22759 printk(KERN_ERR "kvm: already loaded the other module\n");
22760diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
22761index 7e59dc1..b88c98f 100644
22762--- a/arch/x86/lguest/boot.c
22763+++ b/arch/x86/lguest/boot.c
22764@@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
22765 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
22766 * Launcher to reboot us.
22767 */
22768-static void lguest_restart(char *reason)
22769+static __noreturn void lguest_restart(char *reason)
22770 {
22771 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
22772+ BUG();
22773 }
22774
22775 /*G:050
22776diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
22777index 824fa0b..c619e96 100644
22778--- a/arch/x86/lib/atomic64_32.c
22779+++ b/arch/x86/lib/atomic64_32.c
22780@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
22781 }
22782 EXPORT_SYMBOL(atomic64_cmpxchg);
22783
22784+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
22785+{
22786+ return cmpxchg8b(&ptr->counter, old_val, new_val);
22787+}
22788+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
22789+
22790 /**
22791 * atomic64_xchg - xchg atomic64 variable
22792 * @ptr: pointer to type atomic64_t
22793@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
22794 EXPORT_SYMBOL(atomic64_xchg);
22795
22796 /**
22797+ * atomic64_xchg_unchecked - xchg atomic64 variable
22798+ * @ptr: pointer to type atomic64_unchecked_t
22799+ * @new_val: value to assign
22800+ *
22801+ * Atomically xchgs the value of @ptr to @new_val and returns
22802+ * the old value.
22803+ */
22804+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
22805+{
22806+ /*
22807+ * Try first with a (possibly incorrect) assumption about
22808+ * what we have there. We'll do two loops most likely,
22809+ * but we'll get an ownership MESI transaction straight away
22810+ * instead of a read transaction followed by a
22811+ * flush-for-ownership transaction:
22812+ */
22813+ u64 old_val, real_val = 0;
22814+
22815+ do {
22816+ old_val = real_val;
22817+
22818+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
22819+
22820+ } while (real_val != old_val);
22821+
22822+ return old_val;
22823+}
22824+EXPORT_SYMBOL(atomic64_xchg_unchecked);
22825+
22826+/**
22827 * atomic64_set - set atomic64 variable
22828 * @ptr: pointer to type atomic64_t
22829 * @new_val: value to assign
22830@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 new_val)
22831 EXPORT_SYMBOL(atomic64_set);
22832
22833 /**
22834-EXPORT_SYMBOL(atomic64_read);
22835+ * atomic64_unchecked_set - set atomic64 variable
22836+ * @ptr: pointer to type atomic64_unchecked_t
22837+ * @new_val: value to assign
22838+ *
22839+ * Atomically sets the value of @ptr to @new_val.
22840+ */
22841+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
22842+{
22843+ atomic64_xchg_unchecked(ptr, new_val);
22844+}
22845+EXPORT_SYMBOL(atomic64_set_unchecked);
22846+
22847+/**
22848 * atomic64_add_return - add and return
22849 * @delta: integer value to add
22850 * @ptr: pointer to type atomic64_t
22851@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
22852 }
22853 EXPORT_SYMBOL(atomic64_add_return);
22854
22855+/**
22856+ * atomic64_add_return_unchecked - add and return
22857+ * @delta: integer value to add
22858+ * @ptr: pointer to type atomic64_unchecked_t
22859+ *
22860+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
22861+ */
22862+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22863+{
22864+ /*
22865+ * Try first with a (possibly incorrect) assumption about
22866+ * what we have there. We'll do two loops most likely,
22867+ * but we'll get an ownership MESI transaction straight away
22868+ * instead of a read transaction followed by a
22869+ * flush-for-ownership transaction:
22870+ */
22871+ u64 old_val, new_val, real_val = 0;
22872+
22873+ do {
22874+ old_val = real_val;
22875+ new_val = old_val + delta;
22876+
22877+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
22878+
22879+ } while (real_val != old_val);
22880+
22881+ return new_val;
22882+}
22883+EXPORT_SYMBOL(atomic64_add_return_unchecked);
22884+
22885 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
22886 {
22887 return atomic64_add_return(-delta, ptr);
22888 }
22889 EXPORT_SYMBOL(atomic64_sub_return);
22890
22891+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22892+{
22893+ return atomic64_add_return_unchecked(-delta, ptr);
22894+}
22895+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
22896+
22897 u64 atomic64_inc_return(atomic64_t *ptr)
22898 {
22899 return atomic64_add_return(1, ptr);
22900 }
22901 EXPORT_SYMBOL(atomic64_inc_return);
22902
22903+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
22904+{
22905+ return atomic64_add_return_unchecked(1, ptr);
22906+}
22907+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
22908+
22909 u64 atomic64_dec_return(atomic64_t *ptr)
22910 {
22911 return atomic64_sub_return(1, ptr);
22912 }
22913 EXPORT_SYMBOL(atomic64_dec_return);
22914
22915+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
22916+{
22917+ return atomic64_sub_return_unchecked(1, ptr);
22918+}
22919+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
22920+
22921 /**
22922 * atomic64_add - add integer to atomic64 variable
22923 * @delta: integer value to add
22924@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t *ptr)
22925 EXPORT_SYMBOL(atomic64_add);
22926
22927 /**
22928+ * atomic64_add_unchecked - add integer to atomic64 variable
22929+ * @delta: integer value to add
22930+ * @ptr: pointer to type atomic64_unchecked_t
22931+ *
22932+ * Atomically adds @delta to @ptr.
22933+ */
22934+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22935+{
22936+ atomic64_add_return_unchecked(delta, ptr);
22937+}
22938+EXPORT_SYMBOL(atomic64_add_unchecked);
22939+
22940+/**
22941 * atomic64_sub - subtract the atomic64 variable
22942 * @delta: integer value to subtract
22943 * @ptr: pointer to type atomic64_t
22944@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t *ptr)
22945 EXPORT_SYMBOL(atomic64_sub);
22946
22947 /**
22948+ * atomic64_sub_unchecked - subtract the atomic64 variable
22949+ * @delta: integer value to subtract
22950+ * @ptr: pointer to type atomic64_unchecked_t
22951+ *
22952+ * Atomically subtracts @delta from @ptr.
22953+ */
22954+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22955+{
22956+ atomic64_add_unchecked(-delta, ptr);
22957+}
22958+EXPORT_SYMBOL(atomic64_sub_unchecked);
22959+
22960+/**
22961 * atomic64_sub_and_test - subtract value from variable and test result
22962 * @delta: integer value to subtract
22963 * @ptr: pointer to type atomic64_t
22964@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
22965 EXPORT_SYMBOL(atomic64_inc);
22966
22967 /**
22968+ * atomic64_inc_unchecked - increment atomic64 variable
22969+ * @ptr: pointer to type atomic64_unchecked_t
22970+ *
22971+ * Atomically increments @ptr by 1.
22972+ */
22973+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
22974+{
22975+ atomic64_add_unchecked(1, ptr);
22976+}
22977+EXPORT_SYMBOL(atomic64_inc_unchecked);
22978+
22979+/**
22980 * atomic64_dec - decrement atomic64 variable
22981 * @ptr: pointer to type atomic64_t
22982 *
22983@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
22984 EXPORT_SYMBOL(atomic64_dec);
22985
22986 /**
22987+ * atomic64_dec_unchecked - decrement atomic64 variable
22988+ * @ptr: pointer to type atomic64_unchecked_t
22989+ *
22990+ * Atomically decrements @ptr by 1.
22991+ */
22992+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
22993+{
22994+ atomic64_sub_unchecked(1, ptr);
22995+}
22996+EXPORT_SYMBOL(atomic64_dec_unchecked);
22997+
22998+/**
22999 * atomic64_dec_and_test - decrement and test
23000 * @ptr: pointer to type atomic64_t
23001 *
23002diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
23003index adbccd0..98f96c8 100644
23004--- a/arch/x86/lib/checksum_32.S
23005+++ b/arch/x86/lib/checksum_32.S
23006@@ -28,7 +28,8 @@
23007 #include <linux/linkage.h>
23008 #include <asm/dwarf2.h>
23009 #include <asm/errno.h>
23010-
23011+#include <asm/segment.h>
23012+
23013 /*
23014 * computes a partial checksum, e.g. for TCP/UDP fragments
23015 */
23016@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
23017
23018 #define ARGBASE 16
23019 #define FP 12
23020-
23021-ENTRY(csum_partial_copy_generic)
23022+
23023+ENTRY(csum_partial_copy_generic_to_user)
23024 CFI_STARTPROC
23025+
23026+#ifdef CONFIG_PAX_MEMORY_UDEREF
23027+ pushl %gs
23028+ CFI_ADJUST_CFA_OFFSET 4
23029+ popl %es
23030+ CFI_ADJUST_CFA_OFFSET -4
23031+ jmp csum_partial_copy_generic
23032+#endif
23033+
23034+ENTRY(csum_partial_copy_generic_from_user)
23035+
23036+#ifdef CONFIG_PAX_MEMORY_UDEREF
23037+ pushl %gs
23038+ CFI_ADJUST_CFA_OFFSET 4
23039+ popl %ds
23040+ CFI_ADJUST_CFA_OFFSET -4
23041+#endif
23042+
23043+ENTRY(csum_partial_copy_generic)
23044 subl $4,%esp
23045 CFI_ADJUST_CFA_OFFSET 4
23046 pushl %edi
23047@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
23048 jmp 4f
23049 SRC(1: movw (%esi), %bx )
23050 addl $2, %esi
23051-DST( movw %bx, (%edi) )
23052+DST( movw %bx, %es:(%edi) )
23053 addl $2, %edi
23054 addw %bx, %ax
23055 adcl $0, %eax
23056@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
23057 SRC(1: movl (%esi), %ebx )
23058 SRC( movl 4(%esi), %edx )
23059 adcl %ebx, %eax
23060-DST( movl %ebx, (%edi) )
23061+DST( movl %ebx, %es:(%edi) )
23062 adcl %edx, %eax
23063-DST( movl %edx, 4(%edi) )
23064+DST( movl %edx, %es:4(%edi) )
23065
23066 SRC( movl 8(%esi), %ebx )
23067 SRC( movl 12(%esi), %edx )
23068 adcl %ebx, %eax
23069-DST( movl %ebx, 8(%edi) )
23070+DST( movl %ebx, %es:8(%edi) )
23071 adcl %edx, %eax
23072-DST( movl %edx, 12(%edi) )
23073+DST( movl %edx, %es:12(%edi) )
23074
23075 SRC( movl 16(%esi), %ebx )
23076 SRC( movl 20(%esi), %edx )
23077 adcl %ebx, %eax
23078-DST( movl %ebx, 16(%edi) )
23079+DST( movl %ebx, %es:16(%edi) )
23080 adcl %edx, %eax
23081-DST( movl %edx, 20(%edi) )
23082+DST( movl %edx, %es:20(%edi) )
23083
23084 SRC( movl 24(%esi), %ebx )
23085 SRC( movl 28(%esi), %edx )
23086 adcl %ebx, %eax
23087-DST( movl %ebx, 24(%edi) )
23088+DST( movl %ebx, %es:24(%edi) )
23089 adcl %edx, %eax
23090-DST( movl %edx, 28(%edi) )
23091+DST( movl %edx, %es:28(%edi) )
23092
23093 lea 32(%esi), %esi
23094 lea 32(%edi), %edi
23095@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
23096 shrl $2, %edx # This clears CF
23097 SRC(3: movl (%esi), %ebx )
23098 adcl %ebx, %eax
23099-DST( movl %ebx, (%edi) )
23100+DST( movl %ebx, %es:(%edi) )
23101 lea 4(%esi), %esi
23102 lea 4(%edi), %edi
23103 dec %edx
23104@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
23105 jb 5f
23106 SRC( movw (%esi), %cx )
23107 leal 2(%esi), %esi
23108-DST( movw %cx, (%edi) )
23109+DST( movw %cx, %es:(%edi) )
23110 leal 2(%edi), %edi
23111 je 6f
23112 shll $16,%ecx
23113 SRC(5: movb (%esi), %cl )
23114-DST( movb %cl, (%edi) )
23115+DST( movb %cl, %es:(%edi) )
23116 6: addl %ecx, %eax
23117 adcl $0, %eax
23118 7:
23119@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
23120
23121 6001:
23122 movl ARGBASE+20(%esp), %ebx # src_err_ptr
23123- movl $-EFAULT, (%ebx)
23124+ movl $-EFAULT, %ss:(%ebx)
23125
23126 # zero the complete destination - computing the rest
23127 # is too much work
23128@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
23129
23130 6002:
23131 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
23132- movl $-EFAULT,(%ebx)
23133+ movl $-EFAULT,%ss:(%ebx)
23134 jmp 5000b
23135
23136 .previous
23137
23138+ pushl %ss
23139+ CFI_ADJUST_CFA_OFFSET 4
23140+ popl %ds
23141+ CFI_ADJUST_CFA_OFFSET -4
23142+ pushl %ss
23143+ CFI_ADJUST_CFA_OFFSET 4
23144+ popl %es
23145+ CFI_ADJUST_CFA_OFFSET -4
23146 popl %ebx
23147 CFI_ADJUST_CFA_OFFSET -4
23148 CFI_RESTORE ebx
23149@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
23150 CFI_ADJUST_CFA_OFFSET -4
23151 ret
23152 CFI_ENDPROC
23153-ENDPROC(csum_partial_copy_generic)
23154+ENDPROC(csum_partial_copy_generic_to_user)
23155
23156 #else
23157
23158 /* Version for PentiumII/PPro */
23159
23160 #define ROUND1(x) \
23161+ nop; nop; nop; \
23162 SRC(movl x(%esi), %ebx ) ; \
23163 addl %ebx, %eax ; \
23164- DST(movl %ebx, x(%edi) ) ;
23165+ DST(movl %ebx, %es:x(%edi)) ;
23166
23167 #define ROUND(x) \
23168+ nop; nop; nop; \
23169 SRC(movl x(%esi), %ebx ) ; \
23170 adcl %ebx, %eax ; \
23171- DST(movl %ebx, x(%edi) ) ;
23172+ DST(movl %ebx, %es:x(%edi)) ;
23173
23174 #define ARGBASE 12
23175-
23176-ENTRY(csum_partial_copy_generic)
23177+
23178+ENTRY(csum_partial_copy_generic_to_user)
23179 CFI_STARTPROC
23180+
23181+#ifdef CONFIG_PAX_MEMORY_UDEREF
23182+ pushl %gs
23183+ CFI_ADJUST_CFA_OFFSET 4
23184+ popl %es
23185+ CFI_ADJUST_CFA_OFFSET -4
23186+ jmp csum_partial_copy_generic
23187+#endif
23188+
23189+ENTRY(csum_partial_copy_generic_from_user)
23190+
23191+#ifdef CONFIG_PAX_MEMORY_UDEREF
23192+ pushl %gs
23193+ CFI_ADJUST_CFA_OFFSET 4
23194+ popl %ds
23195+ CFI_ADJUST_CFA_OFFSET -4
23196+#endif
23197+
23198+ENTRY(csum_partial_copy_generic)
23199 pushl %ebx
23200 CFI_ADJUST_CFA_OFFSET 4
23201 CFI_REL_OFFSET ebx, 0
23202@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
23203 subl %ebx, %edi
23204 lea -1(%esi),%edx
23205 andl $-32,%edx
23206- lea 3f(%ebx,%ebx), %ebx
23207+ lea 3f(%ebx,%ebx,2), %ebx
23208 testl %esi, %esi
23209 jmp *%ebx
23210 1: addl $64,%esi
23211@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
23212 jb 5f
23213 SRC( movw (%esi), %dx )
23214 leal 2(%esi), %esi
23215-DST( movw %dx, (%edi) )
23216+DST( movw %dx, %es:(%edi) )
23217 leal 2(%edi), %edi
23218 je 6f
23219 shll $16,%edx
23220 5:
23221 SRC( movb (%esi), %dl )
23222-DST( movb %dl, (%edi) )
23223+DST( movb %dl, %es:(%edi) )
23224 6: addl %edx, %eax
23225 adcl $0, %eax
23226 7:
23227 .section .fixup, "ax"
23228 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
23229- movl $-EFAULT, (%ebx)
23230+ movl $-EFAULT, %ss:(%ebx)
23231 # zero the complete destination (computing the rest is too much work)
23232 movl ARGBASE+8(%esp),%edi # dst
23233 movl ARGBASE+12(%esp),%ecx # len
23234@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
23235 rep; stosb
23236 jmp 7b
23237 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
23238- movl $-EFAULT, (%ebx)
23239+ movl $-EFAULT, %ss:(%ebx)
23240 jmp 7b
23241 .previous
23242
23243+#ifdef CONFIG_PAX_MEMORY_UDEREF
23244+ pushl %ss
23245+ CFI_ADJUST_CFA_OFFSET 4
23246+ popl %ds
23247+ CFI_ADJUST_CFA_OFFSET -4
23248+ pushl %ss
23249+ CFI_ADJUST_CFA_OFFSET 4
23250+ popl %es
23251+ CFI_ADJUST_CFA_OFFSET -4
23252+#endif
23253+
23254 popl %esi
23255 CFI_ADJUST_CFA_OFFSET -4
23256 CFI_RESTORE esi
23257@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
23258 CFI_RESTORE ebx
23259 ret
23260 CFI_ENDPROC
23261-ENDPROC(csum_partial_copy_generic)
23262+ENDPROC(csum_partial_copy_generic_to_user)
23263
23264 #undef ROUND
23265 #undef ROUND1
23266diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
23267index ebeafcc..1e3a402 100644
23268--- a/arch/x86/lib/clear_page_64.S
23269+++ b/arch/x86/lib/clear_page_64.S
23270@@ -1,5 +1,6 @@
23271 #include <linux/linkage.h>
23272 #include <asm/dwarf2.h>
23273+#include <asm/alternative-asm.h>
23274
23275 /*
23276 * Zero a page.
23277@@ -10,6 +11,7 @@ ENTRY(clear_page_c)
23278 movl $4096/8,%ecx
23279 xorl %eax,%eax
23280 rep stosq
23281+ pax_force_retaddr
23282 ret
23283 CFI_ENDPROC
23284 ENDPROC(clear_page_c)
23285@@ -33,6 +35,7 @@ ENTRY(clear_page)
23286 leaq 64(%rdi),%rdi
23287 jnz .Lloop
23288 nop
23289+ pax_force_retaddr
23290 ret
23291 CFI_ENDPROC
23292 .Lclear_page_end:
23293@@ -43,7 +46,7 @@ ENDPROC(clear_page)
23294
23295 #include <asm/cpufeature.h>
23296
23297- .section .altinstr_replacement,"ax"
23298+ .section .altinstr_replacement,"a"
23299 1: .byte 0xeb /* jmp <disp8> */
23300 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
23301 2:
23302diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
23303index 727a5d4..333818a 100644
23304--- a/arch/x86/lib/copy_page_64.S
23305+++ b/arch/x86/lib/copy_page_64.S
23306@@ -2,12 +2,14 @@
23307
23308 #include <linux/linkage.h>
23309 #include <asm/dwarf2.h>
23310+#include <asm/alternative-asm.h>
23311
23312 ALIGN
23313 copy_page_c:
23314 CFI_STARTPROC
23315 movl $4096/8,%ecx
23316 rep movsq
23317+ pax_force_retaddr
23318 ret
23319 CFI_ENDPROC
23320 ENDPROC(copy_page_c)
23321@@ -38,7 +40,7 @@ ENTRY(copy_page)
23322 movq 16 (%rsi), %rdx
23323 movq 24 (%rsi), %r8
23324 movq 32 (%rsi), %r9
23325- movq 40 (%rsi), %r10
23326+ movq 40 (%rsi), %r13
23327 movq 48 (%rsi), %r11
23328 movq 56 (%rsi), %r12
23329
23330@@ -49,7 +51,7 @@ ENTRY(copy_page)
23331 movq %rdx, 16 (%rdi)
23332 movq %r8, 24 (%rdi)
23333 movq %r9, 32 (%rdi)
23334- movq %r10, 40 (%rdi)
23335+ movq %r13, 40 (%rdi)
23336 movq %r11, 48 (%rdi)
23337 movq %r12, 56 (%rdi)
23338
23339@@ -68,7 +70,7 @@ ENTRY(copy_page)
23340 movq 16 (%rsi), %rdx
23341 movq 24 (%rsi), %r8
23342 movq 32 (%rsi), %r9
23343- movq 40 (%rsi), %r10
23344+ movq 40 (%rsi), %r13
23345 movq 48 (%rsi), %r11
23346 movq 56 (%rsi), %r12
23347
23348@@ -77,7 +79,7 @@ ENTRY(copy_page)
23349 movq %rdx, 16 (%rdi)
23350 movq %r8, 24 (%rdi)
23351 movq %r9, 32 (%rdi)
23352- movq %r10, 40 (%rdi)
23353+ movq %r13, 40 (%rdi)
23354 movq %r11, 48 (%rdi)
23355 movq %r12, 56 (%rdi)
23356
23357@@ -94,6 +96,7 @@ ENTRY(copy_page)
23358 CFI_RESTORE r13
23359 addq $3*8,%rsp
23360 CFI_ADJUST_CFA_OFFSET -3*8
23361+ pax_force_retaddr
23362 ret
23363 .Lcopy_page_end:
23364 CFI_ENDPROC
23365@@ -104,7 +107,7 @@ ENDPROC(copy_page)
23366
23367 #include <asm/cpufeature.h>
23368
23369- .section .altinstr_replacement,"ax"
23370+ .section .altinstr_replacement,"a"
23371 1: .byte 0xeb /* jmp <disp8> */
23372 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
23373 2:
23374diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
23375index af8debd..40c75f3 100644
23376--- a/arch/x86/lib/copy_user_64.S
23377+++ b/arch/x86/lib/copy_user_64.S
23378@@ -15,13 +15,15 @@
23379 #include <asm/asm-offsets.h>
23380 #include <asm/thread_info.h>
23381 #include <asm/cpufeature.h>
23382+#include <asm/pgtable.h>
23383+#include <asm/alternative-asm.h>
23384
23385 .macro ALTERNATIVE_JUMP feature,orig,alt
23386 0:
23387 .byte 0xe9 /* 32bit jump */
23388 .long \orig-1f /* by default jump to orig */
23389 1:
23390- .section .altinstr_replacement,"ax"
23391+ .section .altinstr_replacement,"a"
23392 2: .byte 0xe9 /* near jump with 32bit immediate */
23393 .long \alt-1b /* offset */ /* or alternatively to alt */
23394 .previous
23395@@ -64,55 +66,26 @@
23396 #endif
23397 .endm
23398
23399-/* Standard copy_to_user with segment limit checking */
23400-ENTRY(copy_to_user)
23401- CFI_STARTPROC
23402- GET_THREAD_INFO(%rax)
23403- movq %rdi,%rcx
23404- addq %rdx,%rcx
23405- jc bad_to_user
23406- cmpq TI_addr_limit(%rax),%rcx
23407- ja bad_to_user
23408- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
23409- CFI_ENDPROC
23410-ENDPROC(copy_to_user)
23411-
23412-/* Standard copy_from_user with segment limit checking */
23413-ENTRY(copy_from_user)
23414- CFI_STARTPROC
23415- GET_THREAD_INFO(%rax)
23416- movq %rsi,%rcx
23417- addq %rdx,%rcx
23418- jc bad_from_user
23419- cmpq TI_addr_limit(%rax),%rcx
23420- ja bad_from_user
23421- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
23422- CFI_ENDPROC
23423-ENDPROC(copy_from_user)
23424-
23425 ENTRY(copy_user_generic)
23426 CFI_STARTPROC
23427 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
23428 CFI_ENDPROC
23429 ENDPROC(copy_user_generic)
23430
23431-ENTRY(__copy_from_user_inatomic)
23432- CFI_STARTPROC
23433- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
23434- CFI_ENDPROC
23435-ENDPROC(__copy_from_user_inatomic)
23436-
23437 .section .fixup,"ax"
23438 /* must zero dest */
23439 ENTRY(bad_from_user)
23440 bad_from_user:
23441 CFI_STARTPROC
23442+ testl %edx,%edx
23443+ js bad_to_user
23444 movl %edx,%ecx
23445 xorl %eax,%eax
23446 rep
23447 stosb
23448 bad_to_user:
23449 movl %edx,%eax
23450+ pax_force_retaddr
23451 ret
23452 CFI_ENDPROC
23453 ENDPROC(bad_from_user)
23454@@ -142,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
23455 jz 17f
23456 1: movq (%rsi),%r8
23457 2: movq 1*8(%rsi),%r9
23458-3: movq 2*8(%rsi),%r10
23459+3: movq 2*8(%rsi),%rax
23460 4: movq 3*8(%rsi),%r11
23461 5: movq %r8,(%rdi)
23462 6: movq %r9,1*8(%rdi)
23463-7: movq %r10,2*8(%rdi)
23464+7: movq %rax,2*8(%rdi)
23465 8: movq %r11,3*8(%rdi)
23466 9: movq 4*8(%rsi),%r8
23467 10: movq 5*8(%rsi),%r9
23468-11: movq 6*8(%rsi),%r10
23469+11: movq 6*8(%rsi),%rax
23470 12: movq 7*8(%rsi),%r11
23471 13: movq %r8,4*8(%rdi)
23472 14: movq %r9,5*8(%rdi)
23473-15: movq %r10,6*8(%rdi)
23474+15: movq %rax,6*8(%rdi)
23475 16: movq %r11,7*8(%rdi)
23476 leaq 64(%rsi),%rsi
23477 leaq 64(%rdi),%rdi
23478@@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
23479 decl %ecx
23480 jnz 21b
23481 23: xor %eax,%eax
23482+ pax_force_retaddr
23483 ret
23484
23485 .section .fixup,"ax"
23486@@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
23487 3: rep
23488 movsb
23489 4: xorl %eax,%eax
23490+ pax_force_retaddr
23491 ret
23492
23493 .section .fixup,"ax"
23494diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
23495index cb0c112..e3a6895 100644
23496--- a/arch/x86/lib/copy_user_nocache_64.S
23497+++ b/arch/x86/lib/copy_user_nocache_64.S
23498@@ -8,12 +8,14 @@
23499
23500 #include <linux/linkage.h>
23501 #include <asm/dwarf2.h>
23502+#include <asm/alternative-asm.h>
23503
23504 #define FIX_ALIGNMENT 1
23505
23506 #include <asm/current.h>
23507 #include <asm/asm-offsets.h>
23508 #include <asm/thread_info.h>
23509+#include <asm/pgtable.h>
23510
23511 .macro ALIGN_DESTINATION
23512 #ifdef FIX_ALIGNMENT
23513@@ -50,6 +52,15 @@
23514 */
23515 ENTRY(__copy_user_nocache)
23516 CFI_STARTPROC
23517+
23518+#ifdef CONFIG_PAX_MEMORY_UDEREF
23519+ mov $PAX_USER_SHADOW_BASE,%rcx
23520+ cmp %rcx,%rsi
23521+ jae 1f
23522+ add %rcx,%rsi
23523+1:
23524+#endif
23525+
23526 cmpl $8,%edx
23527 jb 20f /* less then 8 bytes, go to byte copy loop */
23528 ALIGN_DESTINATION
23529@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
23530 jz 17f
23531 1: movq (%rsi),%r8
23532 2: movq 1*8(%rsi),%r9
23533-3: movq 2*8(%rsi),%r10
23534+3: movq 2*8(%rsi),%rax
23535 4: movq 3*8(%rsi),%r11
23536 5: movnti %r8,(%rdi)
23537 6: movnti %r9,1*8(%rdi)
23538-7: movnti %r10,2*8(%rdi)
23539+7: movnti %rax,2*8(%rdi)
23540 8: movnti %r11,3*8(%rdi)
23541 9: movq 4*8(%rsi),%r8
23542 10: movq 5*8(%rsi),%r9
23543-11: movq 6*8(%rsi),%r10
23544+11: movq 6*8(%rsi),%rax
23545 12: movq 7*8(%rsi),%r11
23546 13: movnti %r8,4*8(%rdi)
23547 14: movnti %r9,5*8(%rdi)
23548-15: movnti %r10,6*8(%rdi)
23549+15: movnti %rax,6*8(%rdi)
23550 16: movnti %r11,7*8(%rdi)
23551 leaq 64(%rsi),%rsi
23552 leaq 64(%rdi),%rdi
23553@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
23554 jnz 21b
23555 23: xorl %eax,%eax
23556 sfence
23557+ pax_force_retaddr
23558 ret
23559
23560 .section .fixup,"ax"
23561diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
23562index f0dba36..48cb4d6 100644
23563--- a/arch/x86/lib/csum-copy_64.S
23564+++ b/arch/x86/lib/csum-copy_64.S
23565@@ -8,6 +8,7 @@
23566 #include <linux/linkage.h>
23567 #include <asm/dwarf2.h>
23568 #include <asm/errno.h>
23569+#include <asm/alternative-asm.h>
23570
23571 /*
23572 * Checksum copy with exception handling.
23573@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
23574 CFI_RESTORE rbp
23575 addq $7*8,%rsp
23576 CFI_ADJUST_CFA_OFFSET -7*8
23577+ pax_force_retaddr 0, 1
23578 ret
23579 CFI_RESTORE_STATE
23580
23581diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
23582index 459b58a..9570bc7 100644
23583--- a/arch/x86/lib/csum-wrappers_64.c
23584+++ b/arch/x86/lib/csum-wrappers_64.c
23585@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
23586 len -= 2;
23587 }
23588 }
23589- isum = csum_partial_copy_generic((__force const void *)src,
23590+
23591+#ifdef CONFIG_PAX_MEMORY_UDEREF
23592+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23593+ src += PAX_USER_SHADOW_BASE;
23594+#endif
23595+
23596+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
23597 dst, len, isum, errp, NULL);
23598 if (unlikely(*errp))
23599 goto out_err;
23600@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
23601 }
23602
23603 *errp = 0;
23604- return csum_partial_copy_generic(src, (void __force *)dst,
23605+
23606+#ifdef CONFIG_PAX_MEMORY_UDEREF
23607+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
23608+ dst += PAX_USER_SHADOW_BASE;
23609+#endif
23610+
23611+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
23612 len, isum, NULL, errp);
23613 }
23614 EXPORT_SYMBOL(csum_partial_copy_to_user);
23615diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
23616index 51f1504..ddac4c1 100644
23617--- a/arch/x86/lib/getuser.S
23618+++ b/arch/x86/lib/getuser.S
23619@@ -33,15 +33,38 @@
23620 #include <asm/asm-offsets.h>
23621 #include <asm/thread_info.h>
23622 #include <asm/asm.h>
23623+#include <asm/segment.h>
23624+#include <asm/pgtable.h>
23625+#include <asm/alternative-asm.h>
23626+
23627+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23628+#define __copyuser_seg gs;
23629+#else
23630+#define __copyuser_seg
23631+#endif
23632
23633 .text
23634 ENTRY(__get_user_1)
23635 CFI_STARTPROC
23636+
23637+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23638 GET_THREAD_INFO(%_ASM_DX)
23639 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23640 jae bad_get_user
23641-1: movzb (%_ASM_AX),%edx
23642+
23643+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23644+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23645+ cmp %_ASM_DX,%_ASM_AX
23646+ jae 1234f
23647+ add %_ASM_DX,%_ASM_AX
23648+1234:
23649+#endif
23650+
23651+#endif
23652+
23653+1: __copyuser_seg movzb (%_ASM_AX),%edx
23654 xor %eax,%eax
23655+ pax_force_retaddr
23656 ret
23657 CFI_ENDPROC
23658 ENDPROC(__get_user_1)
23659@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
23660 ENTRY(__get_user_2)
23661 CFI_STARTPROC
23662 add $1,%_ASM_AX
23663+
23664+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23665 jc bad_get_user
23666 GET_THREAD_INFO(%_ASM_DX)
23667 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23668 jae bad_get_user
23669-2: movzwl -1(%_ASM_AX),%edx
23670+
23671+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23672+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23673+ cmp %_ASM_DX,%_ASM_AX
23674+ jae 1234f
23675+ add %_ASM_DX,%_ASM_AX
23676+1234:
23677+#endif
23678+
23679+#endif
23680+
23681+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
23682 xor %eax,%eax
23683+ pax_force_retaddr
23684 ret
23685 CFI_ENDPROC
23686 ENDPROC(__get_user_2)
23687@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
23688 ENTRY(__get_user_4)
23689 CFI_STARTPROC
23690 add $3,%_ASM_AX
23691+
23692+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23693 jc bad_get_user
23694 GET_THREAD_INFO(%_ASM_DX)
23695 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23696 jae bad_get_user
23697-3: mov -3(%_ASM_AX),%edx
23698+
23699+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23700+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23701+ cmp %_ASM_DX,%_ASM_AX
23702+ jae 1234f
23703+ add %_ASM_DX,%_ASM_AX
23704+1234:
23705+#endif
23706+
23707+#endif
23708+
23709+3: __copyuser_seg mov -3(%_ASM_AX),%edx
23710 xor %eax,%eax
23711+ pax_force_retaddr
23712 ret
23713 CFI_ENDPROC
23714 ENDPROC(__get_user_4)
23715@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
23716 GET_THREAD_INFO(%_ASM_DX)
23717 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23718 jae bad_get_user
23719+
23720+#ifdef CONFIG_PAX_MEMORY_UDEREF
23721+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23722+ cmp %_ASM_DX,%_ASM_AX
23723+ jae 1234f
23724+ add %_ASM_DX,%_ASM_AX
23725+1234:
23726+#endif
23727+
23728 4: movq -7(%_ASM_AX),%_ASM_DX
23729 xor %eax,%eax
23730+ pax_force_retaddr
23731 ret
23732 CFI_ENDPROC
23733 ENDPROC(__get_user_8)
23734@@ -91,6 +152,7 @@ bad_get_user:
23735 CFI_STARTPROC
23736 xor %edx,%edx
23737 mov $(-EFAULT),%_ASM_AX
23738+ pax_force_retaddr
23739 ret
23740 CFI_ENDPROC
23741 END(bad_get_user)
23742diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
23743index 05a95e7..326f2fa 100644
23744--- a/arch/x86/lib/iomap_copy_64.S
23745+++ b/arch/x86/lib/iomap_copy_64.S
23746@@ -17,6 +17,7 @@
23747
23748 #include <linux/linkage.h>
23749 #include <asm/dwarf2.h>
23750+#include <asm/alternative-asm.h>
23751
23752 /*
23753 * override generic version in lib/iomap_copy.c
23754@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
23755 CFI_STARTPROC
23756 movl %edx,%ecx
23757 rep movsd
23758+ pax_force_retaddr
23759 ret
23760 CFI_ENDPROC
23761 ENDPROC(__iowrite32_copy)
23762diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
23763index ad5441e..610e351 100644
23764--- a/arch/x86/lib/memcpy_64.S
23765+++ b/arch/x86/lib/memcpy_64.S
23766@@ -4,6 +4,7 @@
23767
23768 #include <asm/cpufeature.h>
23769 #include <asm/dwarf2.h>
23770+#include <asm/alternative-asm.h>
23771
23772 /*
23773 * memcpy - Copy a memory block.
23774@@ -34,6 +35,7 @@ memcpy_c:
23775 rep movsq
23776 movl %edx, %ecx
23777 rep movsb
23778+ pax_force_retaddr
23779 ret
23780 CFI_ENDPROC
23781 ENDPROC(memcpy_c)
23782@@ -118,6 +120,7 @@ ENTRY(memcpy)
23783 jnz .Lloop_1
23784
23785 .Lend:
23786+ pax_force_retaddr 0, 1
23787 ret
23788 CFI_ENDPROC
23789 ENDPROC(memcpy)
23790@@ -128,7 +131,7 @@ ENDPROC(__memcpy)
23791 * It is also a lot simpler. Use this when possible:
23792 */
23793
23794- .section .altinstr_replacement, "ax"
23795+ .section .altinstr_replacement, "a"
23796 1: .byte 0xeb /* jmp <disp8> */
23797 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
23798 2:
23799diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
23800index 2c59481..7e9ba4e 100644
23801--- a/arch/x86/lib/memset_64.S
23802+++ b/arch/x86/lib/memset_64.S
23803@@ -2,6 +2,7 @@
23804
23805 #include <linux/linkage.h>
23806 #include <asm/dwarf2.h>
23807+#include <asm/alternative-asm.h>
23808
23809 /*
23810 * ISO C memset - set a memory block to a byte value.
23811@@ -28,6 +29,7 @@ memset_c:
23812 movl %r8d,%ecx
23813 rep stosb
23814 movq %r9,%rax
23815+ pax_force_retaddr
23816 ret
23817 CFI_ENDPROC
23818 ENDPROC(memset_c)
23819@@ -35,13 +37,13 @@ ENDPROC(memset_c)
23820 ENTRY(memset)
23821 ENTRY(__memset)
23822 CFI_STARTPROC
23823- movq %rdi,%r10
23824 movq %rdx,%r11
23825
23826 /* expand byte value */
23827 movzbl %sil,%ecx
23828 movabs $0x0101010101010101,%rax
23829 mul %rcx /* with rax, clobbers rdx */
23830+ movq %rdi,%rdx
23831
23832 /* align dst */
23833 movl %edi,%r9d
23834@@ -95,7 +97,8 @@ ENTRY(__memset)
23835 jnz .Lloop_1
23836
23837 .Lende:
23838- movq %r10,%rax
23839+ movq %rdx,%rax
23840+ pax_force_retaddr
23841 ret
23842
23843 CFI_RESTORE_STATE
23844@@ -118,7 +121,7 @@ ENDPROC(__memset)
23845
23846 #include <asm/cpufeature.h>
23847
23848- .section .altinstr_replacement,"ax"
23849+ .section .altinstr_replacement,"a"
23850 1: .byte 0xeb /* jmp <disp8> */
23851 .byte (memset_c - memset) - (2f - 1b) /* offset */
23852 2:
23853diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
23854index c9f2d9b..e7fd2c0 100644
23855--- a/arch/x86/lib/mmx_32.c
23856+++ b/arch/x86/lib/mmx_32.c
23857@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
23858 {
23859 void *p;
23860 int i;
23861+ unsigned long cr0;
23862
23863 if (unlikely(in_interrupt()))
23864 return __memcpy(to, from, len);
23865@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
23866 kernel_fpu_begin();
23867
23868 __asm__ __volatile__ (
23869- "1: prefetch (%0)\n" /* This set is 28 bytes */
23870- " prefetch 64(%0)\n"
23871- " prefetch 128(%0)\n"
23872- " prefetch 192(%0)\n"
23873- " prefetch 256(%0)\n"
23874+ "1: prefetch (%1)\n" /* This set is 28 bytes */
23875+ " prefetch 64(%1)\n"
23876+ " prefetch 128(%1)\n"
23877+ " prefetch 192(%1)\n"
23878+ " prefetch 256(%1)\n"
23879 "2: \n"
23880 ".section .fixup, \"ax\"\n"
23881- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23882+ "3: \n"
23883+
23884+#ifdef CONFIG_PAX_KERNEXEC
23885+ " movl %%cr0, %0\n"
23886+ " movl %0, %%eax\n"
23887+ " andl $0xFFFEFFFF, %%eax\n"
23888+ " movl %%eax, %%cr0\n"
23889+#endif
23890+
23891+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23892+
23893+#ifdef CONFIG_PAX_KERNEXEC
23894+ " movl %0, %%cr0\n"
23895+#endif
23896+
23897 " jmp 2b\n"
23898 ".previous\n"
23899 _ASM_EXTABLE(1b, 3b)
23900- : : "r" (from));
23901+ : "=&r" (cr0) : "r" (from) : "ax");
23902
23903 for ( ; i > 5; i--) {
23904 __asm__ __volatile__ (
23905- "1: prefetch 320(%0)\n"
23906- "2: movq (%0), %%mm0\n"
23907- " movq 8(%0), %%mm1\n"
23908- " movq 16(%0), %%mm2\n"
23909- " movq 24(%0), %%mm3\n"
23910- " movq %%mm0, (%1)\n"
23911- " movq %%mm1, 8(%1)\n"
23912- " movq %%mm2, 16(%1)\n"
23913- " movq %%mm3, 24(%1)\n"
23914- " movq 32(%0), %%mm0\n"
23915- " movq 40(%0), %%mm1\n"
23916- " movq 48(%0), %%mm2\n"
23917- " movq 56(%0), %%mm3\n"
23918- " movq %%mm0, 32(%1)\n"
23919- " movq %%mm1, 40(%1)\n"
23920- " movq %%mm2, 48(%1)\n"
23921- " movq %%mm3, 56(%1)\n"
23922+ "1: prefetch 320(%1)\n"
23923+ "2: movq (%1), %%mm0\n"
23924+ " movq 8(%1), %%mm1\n"
23925+ " movq 16(%1), %%mm2\n"
23926+ " movq 24(%1), %%mm3\n"
23927+ " movq %%mm0, (%2)\n"
23928+ " movq %%mm1, 8(%2)\n"
23929+ " movq %%mm2, 16(%2)\n"
23930+ " movq %%mm3, 24(%2)\n"
23931+ " movq 32(%1), %%mm0\n"
23932+ " movq 40(%1), %%mm1\n"
23933+ " movq 48(%1), %%mm2\n"
23934+ " movq 56(%1), %%mm3\n"
23935+ " movq %%mm0, 32(%2)\n"
23936+ " movq %%mm1, 40(%2)\n"
23937+ " movq %%mm2, 48(%2)\n"
23938+ " movq %%mm3, 56(%2)\n"
23939 ".section .fixup, \"ax\"\n"
23940- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23941+ "3:\n"
23942+
23943+#ifdef CONFIG_PAX_KERNEXEC
23944+ " movl %%cr0, %0\n"
23945+ " movl %0, %%eax\n"
23946+ " andl $0xFFFEFFFF, %%eax\n"
23947+ " movl %%eax, %%cr0\n"
23948+#endif
23949+
23950+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23951+
23952+#ifdef CONFIG_PAX_KERNEXEC
23953+ " movl %0, %%cr0\n"
23954+#endif
23955+
23956 " jmp 2b\n"
23957 ".previous\n"
23958 _ASM_EXTABLE(1b, 3b)
23959- : : "r" (from), "r" (to) : "memory");
23960+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
23961
23962 from += 64;
23963 to += 64;
23964@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
23965 static void fast_copy_page(void *to, void *from)
23966 {
23967 int i;
23968+ unsigned long cr0;
23969
23970 kernel_fpu_begin();
23971
23972@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
23973 * but that is for later. -AV
23974 */
23975 __asm__ __volatile__(
23976- "1: prefetch (%0)\n"
23977- " prefetch 64(%0)\n"
23978- " prefetch 128(%0)\n"
23979- " prefetch 192(%0)\n"
23980- " prefetch 256(%0)\n"
23981+ "1: prefetch (%1)\n"
23982+ " prefetch 64(%1)\n"
23983+ " prefetch 128(%1)\n"
23984+ " prefetch 192(%1)\n"
23985+ " prefetch 256(%1)\n"
23986 "2: \n"
23987 ".section .fixup, \"ax\"\n"
23988- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23989+ "3: \n"
23990+
23991+#ifdef CONFIG_PAX_KERNEXEC
23992+ " movl %%cr0, %0\n"
23993+ " movl %0, %%eax\n"
23994+ " andl $0xFFFEFFFF, %%eax\n"
23995+ " movl %%eax, %%cr0\n"
23996+#endif
23997+
23998+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23999+
24000+#ifdef CONFIG_PAX_KERNEXEC
24001+ " movl %0, %%cr0\n"
24002+#endif
24003+
24004 " jmp 2b\n"
24005 ".previous\n"
24006- _ASM_EXTABLE(1b, 3b) : : "r" (from));
24007+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
24008
24009 for (i = 0; i < (4096-320)/64; i++) {
24010 __asm__ __volatile__ (
24011- "1: prefetch 320(%0)\n"
24012- "2: movq (%0), %%mm0\n"
24013- " movntq %%mm0, (%1)\n"
24014- " movq 8(%0), %%mm1\n"
24015- " movntq %%mm1, 8(%1)\n"
24016- " movq 16(%0), %%mm2\n"
24017- " movntq %%mm2, 16(%1)\n"
24018- " movq 24(%0), %%mm3\n"
24019- " movntq %%mm3, 24(%1)\n"
24020- " movq 32(%0), %%mm4\n"
24021- " movntq %%mm4, 32(%1)\n"
24022- " movq 40(%0), %%mm5\n"
24023- " movntq %%mm5, 40(%1)\n"
24024- " movq 48(%0), %%mm6\n"
24025- " movntq %%mm6, 48(%1)\n"
24026- " movq 56(%0), %%mm7\n"
24027- " movntq %%mm7, 56(%1)\n"
24028+ "1: prefetch 320(%1)\n"
24029+ "2: movq (%1), %%mm0\n"
24030+ " movntq %%mm0, (%2)\n"
24031+ " movq 8(%1), %%mm1\n"
24032+ " movntq %%mm1, 8(%2)\n"
24033+ " movq 16(%1), %%mm2\n"
24034+ " movntq %%mm2, 16(%2)\n"
24035+ " movq 24(%1), %%mm3\n"
24036+ " movntq %%mm3, 24(%2)\n"
24037+ " movq 32(%1), %%mm4\n"
24038+ " movntq %%mm4, 32(%2)\n"
24039+ " movq 40(%1), %%mm5\n"
24040+ " movntq %%mm5, 40(%2)\n"
24041+ " movq 48(%1), %%mm6\n"
24042+ " movntq %%mm6, 48(%2)\n"
24043+ " movq 56(%1), %%mm7\n"
24044+ " movntq %%mm7, 56(%2)\n"
24045 ".section .fixup, \"ax\"\n"
24046- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
24047+ "3:\n"
24048+
24049+#ifdef CONFIG_PAX_KERNEXEC
24050+ " movl %%cr0, %0\n"
24051+ " movl %0, %%eax\n"
24052+ " andl $0xFFFEFFFF, %%eax\n"
24053+ " movl %%eax, %%cr0\n"
24054+#endif
24055+
24056+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
24057+
24058+#ifdef CONFIG_PAX_KERNEXEC
24059+ " movl %0, %%cr0\n"
24060+#endif
24061+
24062 " jmp 2b\n"
24063 ".previous\n"
24064- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
24065+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
24066
24067 from += 64;
24068 to += 64;
24069@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
24070 static void fast_copy_page(void *to, void *from)
24071 {
24072 int i;
24073+ unsigned long cr0;
24074
24075 kernel_fpu_begin();
24076
24077 __asm__ __volatile__ (
24078- "1: prefetch (%0)\n"
24079- " prefetch 64(%0)\n"
24080- " prefetch 128(%0)\n"
24081- " prefetch 192(%0)\n"
24082- " prefetch 256(%0)\n"
24083+ "1: prefetch (%1)\n"
24084+ " prefetch 64(%1)\n"
24085+ " prefetch 128(%1)\n"
24086+ " prefetch 192(%1)\n"
24087+ " prefetch 256(%1)\n"
24088 "2: \n"
24089 ".section .fixup, \"ax\"\n"
24090- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
24091+ "3: \n"
24092+
24093+#ifdef CONFIG_PAX_KERNEXEC
24094+ " movl %%cr0, %0\n"
24095+ " movl %0, %%eax\n"
24096+ " andl $0xFFFEFFFF, %%eax\n"
24097+ " movl %%eax, %%cr0\n"
24098+#endif
24099+
24100+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
24101+
24102+#ifdef CONFIG_PAX_KERNEXEC
24103+ " movl %0, %%cr0\n"
24104+#endif
24105+
24106 " jmp 2b\n"
24107 ".previous\n"
24108- _ASM_EXTABLE(1b, 3b) : : "r" (from));
24109+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
24110
24111 for (i = 0; i < 4096/64; i++) {
24112 __asm__ __volatile__ (
24113- "1: prefetch 320(%0)\n"
24114- "2: movq (%0), %%mm0\n"
24115- " movq 8(%0), %%mm1\n"
24116- " movq 16(%0), %%mm2\n"
24117- " movq 24(%0), %%mm3\n"
24118- " movq %%mm0, (%1)\n"
24119- " movq %%mm1, 8(%1)\n"
24120- " movq %%mm2, 16(%1)\n"
24121- " movq %%mm3, 24(%1)\n"
24122- " movq 32(%0), %%mm0\n"
24123- " movq 40(%0), %%mm1\n"
24124- " movq 48(%0), %%mm2\n"
24125- " movq 56(%0), %%mm3\n"
24126- " movq %%mm0, 32(%1)\n"
24127- " movq %%mm1, 40(%1)\n"
24128- " movq %%mm2, 48(%1)\n"
24129- " movq %%mm3, 56(%1)\n"
24130+ "1: prefetch 320(%1)\n"
24131+ "2: movq (%1), %%mm0\n"
24132+ " movq 8(%1), %%mm1\n"
24133+ " movq 16(%1), %%mm2\n"
24134+ " movq 24(%1), %%mm3\n"
24135+ " movq %%mm0, (%2)\n"
24136+ " movq %%mm1, 8(%2)\n"
24137+ " movq %%mm2, 16(%2)\n"
24138+ " movq %%mm3, 24(%2)\n"
24139+ " movq 32(%1), %%mm0\n"
24140+ " movq 40(%1), %%mm1\n"
24141+ " movq 48(%1), %%mm2\n"
24142+ " movq 56(%1), %%mm3\n"
24143+ " movq %%mm0, 32(%2)\n"
24144+ " movq %%mm1, 40(%2)\n"
24145+ " movq %%mm2, 48(%2)\n"
24146+ " movq %%mm3, 56(%2)\n"
24147 ".section .fixup, \"ax\"\n"
24148- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
24149+ "3:\n"
24150+
24151+#ifdef CONFIG_PAX_KERNEXEC
24152+ " movl %%cr0, %0\n"
24153+ " movl %0, %%eax\n"
24154+ " andl $0xFFFEFFFF, %%eax\n"
24155+ " movl %%eax, %%cr0\n"
24156+#endif
24157+
24158+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
24159+
24160+#ifdef CONFIG_PAX_KERNEXEC
24161+ " movl %0, %%cr0\n"
24162+#endif
24163+
24164 " jmp 2b\n"
24165 ".previous\n"
24166 _ASM_EXTABLE(1b, 3b)
24167- : : "r" (from), "r" (to) : "memory");
24168+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
24169
24170 from += 64;
24171 to += 64;
24172diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
24173index 69fa106..adda88b 100644
24174--- a/arch/x86/lib/msr-reg.S
24175+++ b/arch/x86/lib/msr-reg.S
24176@@ -3,6 +3,7 @@
24177 #include <asm/dwarf2.h>
24178 #include <asm/asm.h>
24179 #include <asm/msr.h>
24180+#include <asm/alternative-asm.h>
24181
24182 #ifdef CONFIG_X86_64
24183 /*
24184@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
24185 CFI_STARTPROC
24186 pushq_cfi %rbx
24187 pushq_cfi %rbp
24188- movq %rdi, %r10 /* Save pointer */
24189+ movq %rdi, %r9 /* Save pointer */
24190 xorl %r11d, %r11d /* Return value */
24191 movl (%rdi), %eax
24192 movl 4(%rdi), %ecx
24193@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
24194 movl 28(%rdi), %edi
24195 CFI_REMEMBER_STATE
24196 1: \op
24197-2: movl %eax, (%r10)
24198+2: movl %eax, (%r9)
24199 movl %r11d, %eax /* Return value */
24200- movl %ecx, 4(%r10)
24201- movl %edx, 8(%r10)
24202- movl %ebx, 12(%r10)
24203- movl %ebp, 20(%r10)
24204- movl %esi, 24(%r10)
24205- movl %edi, 28(%r10)
24206+ movl %ecx, 4(%r9)
24207+ movl %edx, 8(%r9)
24208+ movl %ebx, 12(%r9)
24209+ movl %ebp, 20(%r9)
24210+ movl %esi, 24(%r9)
24211+ movl %edi, 28(%r9)
24212 popq_cfi %rbp
24213 popq_cfi %rbx
24214+ pax_force_retaddr
24215 ret
24216 3:
24217 CFI_RESTORE_STATE
24218diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
24219index 36b0d15..d381858 100644
24220--- a/arch/x86/lib/putuser.S
24221+++ b/arch/x86/lib/putuser.S
24222@@ -15,7 +15,9 @@
24223 #include <asm/thread_info.h>
24224 #include <asm/errno.h>
24225 #include <asm/asm.h>
24226-
24227+#include <asm/segment.h>
24228+#include <asm/pgtable.h>
24229+#include <asm/alternative-asm.h>
24230
24231 /*
24232 * __put_user_X
24233@@ -29,52 +31,119 @@
24234 * as they get called from within inline assembly.
24235 */
24236
24237-#define ENTER CFI_STARTPROC ; \
24238- GET_THREAD_INFO(%_ASM_BX)
24239-#define EXIT ret ; \
24240+#define ENTER CFI_STARTPROC
24241+#define EXIT pax_force_retaddr; ret ; \
24242 CFI_ENDPROC
24243
24244+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24245+#define _DEST %_ASM_CX,%_ASM_BX
24246+#else
24247+#define _DEST %_ASM_CX
24248+#endif
24249+
24250+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
24251+#define __copyuser_seg gs;
24252+#else
24253+#define __copyuser_seg
24254+#endif
24255+
24256 .text
24257 ENTRY(__put_user_1)
24258 ENTER
24259+
24260+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24261+ GET_THREAD_INFO(%_ASM_BX)
24262 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
24263 jae bad_put_user
24264-1: movb %al,(%_ASM_CX)
24265+
24266+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24267+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24268+ cmp %_ASM_BX,%_ASM_CX
24269+ jb 1234f
24270+ xor %ebx,%ebx
24271+1234:
24272+#endif
24273+
24274+#endif
24275+
24276+1: __copyuser_seg movb %al,(_DEST)
24277 xor %eax,%eax
24278 EXIT
24279 ENDPROC(__put_user_1)
24280
24281 ENTRY(__put_user_2)
24282 ENTER
24283+
24284+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24285+ GET_THREAD_INFO(%_ASM_BX)
24286 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24287 sub $1,%_ASM_BX
24288 cmp %_ASM_BX,%_ASM_CX
24289 jae bad_put_user
24290-2: movw %ax,(%_ASM_CX)
24291+
24292+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24293+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24294+ cmp %_ASM_BX,%_ASM_CX
24295+ jb 1234f
24296+ xor %ebx,%ebx
24297+1234:
24298+#endif
24299+
24300+#endif
24301+
24302+2: __copyuser_seg movw %ax,(_DEST)
24303 xor %eax,%eax
24304 EXIT
24305 ENDPROC(__put_user_2)
24306
24307 ENTRY(__put_user_4)
24308 ENTER
24309+
24310+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24311+ GET_THREAD_INFO(%_ASM_BX)
24312 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24313 sub $3,%_ASM_BX
24314 cmp %_ASM_BX,%_ASM_CX
24315 jae bad_put_user
24316-3: movl %eax,(%_ASM_CX)
24317+
24318+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24319+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24320+ cmp %_ASM_BX,%_ASM_CX
24321+ jb 1234f
24322+ xor %ebx,%ebx
24323+1234:
24324+#endif
24325+
24326+#endif
24327+
24328+3: __copyuser_seg movl %eax,(_DEST)
24329 xor %eax,%eax
24330 EXIT
24331 ENDPROC(__put_user_4)
24332
24333 ENTRY(__put_user_8)
24334 ENTER
24335+
24336+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24337+ GET_THREAD_INFO(%_ASM_BX)
24338 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24339 sub $7,%_ASM_BX
24340 cmp %_ASM_BX,%_ASM_CX
24341 jae bad_put_user
24342-4: mov %_ASM_AX,(%_ASM_CX)
24343+
24344+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24345+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24346+ cmp %_ASM_BX,%_ASM_CX
24347+ jb 1234f
24348+ xor %ebx,%ebx
24349+1234:
24350+#endif
24351+
24352+#endif
24353+
24354+4: __copyuser_seg mov %_ASM_AX,(_DEST)
24355 #ifdef CONFIG_X86_32
24356-5: movl %edx,4(%_ASM_CX)
24357+5: __copyuser_seg movl %edx,4(_DEST)
24358 #endif
24359 xor %eax,%eax
24360 EXIT
24361diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
24362index 05ea55f..6345b9a 100644
24363--- a/arch/x86/lib/rwlock_64.S
24364+++ b/arch/x86/lib/rwlock_64.S
24365@@ -2,6 +2,7 @@
24366
24367 #include <linux/linkage.h>
24368 #include <asm/rwlock.h>
24369+#include <asm/asm.h>
24370 #include <asm/alternative-asm.h>
24371 #include <asm/dwarf2.h>
24372
24373@@ -10,13 +11,34 @@ ENTRY(__write_lock_failed)
24374 CFI_STARTPROC
24375 LOCK_PREFIX
24376 addl $RW_LOCK_BIAS,(%rdi)
24377+
24378+#ifdef CONFIG_PAX_REFCOUNT
24379+ jno 1234f
24380+ LOCK_PREFIX
24381+ subl $RW_LOCK_BIAS,(%rdi)
24382+ int $4
24383+1234:
24384+ _ASM_EXTABLE(1234b, 1234b)
24385+#endif
24386+
24387 1: rep
24388 nop
24389 cmpl $RW_LOCK_BIAS,(%rdi)
24390 jne 1b
24391 LOCK_PREFIX
24392 subl $RW_LOCK_BIAS,(%rdi)
24393+
24394+#ifdef CONFIG_PAX_REFCOUNT
24395+ jno 1234f
24396+ LOCK_PREFIX
24397+ addl $RW_LOCK_BIAS,(%rdi)
24398+ int $4
24399+1234:
24400+ _ASM_EXTABLE(1234b, 1234b)
24401+#endif
24402+
24403 jnz __write_lock_failed
24404+ pax_force_retaddr
24405 ret
24406 CFI_ENDPROC
24407 END(__write_lock_failed)
24408@@ -26,13 +48,34 @@ ENTRY(__read_lock_failed)
24409 CFI_STARTPROC
24410 LOCK_PREFIX
24411 incl (%rdi)
24412+
24413+#ifdef CONFIG_PAX_REFCOUNT
24414+ jno 1234f
24415+ LOCK_PREFIX
24416+ decl (%rdi)
24417+ int $4
24418+1234:
24419+ _ASM_EXTABLE(1234b, 1234b)
24420+#endif
24421+
24422 1: rep
24423 nop
24424 cmpl $1,(%rdi)
24425 js 1b
24426 LOCK_PREFIX
24427 decl (%rdi)
24428+
24429+#ifdef CONFIG_PAX_REFCOUNT
24430+ jno 1234f
24431+ LOCK_PREFIX
24432+ incl (%rdi)
24433+ int $4
24434+1234:
24435+ _ASM_EXTABLE(1234b, 1234b)
24436+#endif
24437+
24438 js __read_lock_failed
24439+ pax_force_retaddr
24440 ret
24441 CFI_ENDPROC
24442 END(__read_lock_failed)
24443diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
24444index 15acecf..f768b10 100644
24445--- a/arch/x86/lib/rwsem_64.S
24446+++ b/arch/x86/lib/rwsem_64.S
24447@@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
24448 call rwsem_down_read_failed
24449 popq %rdx
24450 restore_common_regs
24451+ pax_force_retaddr
24452 ret
24453 ENDPROC(call_rwsem_down_read_failed)
24454
24455@@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
24456 movq %rax,%rdi
24457 call rwsem_down_write_failed
24458 restore_common_regs
24459+ pax_force_retaddr
24460 ret
24461 ENDPROC(call_rwsem_down_write_failed)
24462
24463@@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
24464 movq %rax,%rdi
24465 call rwsem_wake
24466 restore_common_regs
24467-1: ret
24468+1: pax_force_retaddr
24469+ ret
24470 ENDPROC(call_rwsem_wake)
24471
24472 /* Fix up special calling conventions */
24473@@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
24474 call rwsem_downgrade_wake
24475 popq %rdx
24476 restore_common_regs
24477+ pax_force_retaddr
24478 ret
24479 ENDPROC(call_rwsem_downgrade_wake)
24480diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
24481index bf9a7d5..fb06ab5 100644
24482--- a/arch/x86/lib/thunk_64.S
24483+++ b/arch/x86/lib/thunk_64.S
24484@@ -10,7 +10,8 @@
24485 #include <asm/dwarf2.h>
24486 #include <asm/calling.h>
24487 #include <asm/rwlock.h>
24488-
24489+ #include <asm/alternative-asm.h>
24490+
24491 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
24492 .macro thunk name,func
24493 .globl \name
24494@@ -70,6 +71,7 @@
24495 SAVE_ARGS
24496 restore:
24497 RESTORE_ARGS
24498+ pax_force_retaddr
24499 ret
24500 CFI_ENDPROC
24501
24502@@ -77,5 +79,6 @@ restore:
24503 SAVE_ARGS
24504 restore_norax:
24505 RESTORE_ARGS 1
24506+ pax_force_retaddr
24507 ret
24508 CFI_ENDPROC
24509diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
24510index 1f118d4..8e0ead9 100644
24511--- a/arch/x86/lib/usercopy_32.c
24512+++ b/arch/x86/lib/usercopy_32.c
24513@@ -43,7 +43,7 @@ do { \
24514 __asm__ __volatile__( \
24515 " testl %1,%1\n" \
24516 " jz 2f\n" \
24517- "0: lodsb\n" \
24518+ "0: "__copyuser_seg"lodsb\n" \
24519 " stosb\n" \
24520 " testb %%al,%%al\n" \
24521 " jz 1f\n" \
24522@@ -83,7 +83,7 @@ do { \
24523 * and returns @count.
24524 */
24525 long
24526-__strncpy_from_user(char *dst, const char __user *src, long count)
24527+__strncpy_from_user(char *dst, const char __user *src, unsigned long count)
24528 {
24529 long res;
24530 __do_strncpy_from_user(dst, src, count, res);
24531@@ -110,7 +110,7 @@ EXPORT_SYMBOL(__strncpy_from_user);
24532 * and returns @count.
24533 */
24534 long
24535-strncpy_from_user(char *dst, const char __user *src, long count)
24536+strncpy_from_user(char *dst, const char __user *src, unsigned long count)
24537 {
24538 long res = -EFAULT;
24539 if (access_ok(VERIFY_READ, src, 1))
24540@@ -128,10 +128,12 @@ do { \
24541 int __d0; \
24542 might_fault(); \
24543 __asm__ __volatile__( \
24544+ __COPYUSER_SET_ES \
24545 "0: rep; stosl\n" \
24546 " movl %2,%0\n" \
24547 "1: rep; stosb\n" \
24548 "2:\n" \
24549+ __COPYUSER_RESTORE_ES \
24550 ".section .fixup,\"ax\"\n" \
24551 "3: lea 0(%2,%0,4),%0\n" \
24552 " jmp 2b\n" \
24553@@ -192,7 +194,7 @@ EXPORT_SYMBOL(__clear_user);
24554 * On exception, returns 0.
24555 * If the string is too long, returns a value greater than @n.
24556 */
24557-long strnlen_user(const char __user *s, long n)
24558+long strnlen_user(const char __user *s, unsigned long n)
24559 {
24560 unsigned long mask = -__addr_ok(s);
24561 unsigned long res, tmp;
24562@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
24563 might_fault();
24564
24565 __asm__ __volatile__(
24566+ __COPYUSER_SET_ES
24567 " testl %0, %0\n"
24568 " jz 3f\n"
24569 " andl %0,%%ecx\n"
24570@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
24571 " subl %%ecx,%0\n"
24572 " addl %0,%%eax\n"
24573 "1:\n"
24574+ __COPYUSER_RESTORE_ES
24575 ".section .fixup,\"ax\"\n"
24576 "2: xorl %%eax,%%eax\n"
24577 " jmp 1b\n"
24578@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
24579
24580 #ifdef CONFIG_X86_INTEL_USERCOPY
24581 static unsigned long
24582-__copy_user_intel(void __user *to, const void *from, unsigned long size)
24583+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
24584 {
24585 int d0, d1;
24586 __asm__ __volatile__(
24587@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24588 " .align 2,0x90\n"
24589 "3: movl 0(%4), %%eax\n"
24590 "4: movl 4(%4), %%edx\n"
24591- "5: movl %%eax, 0(%3)\n"
24592- "6: movl %%edx, 4(%3)\n"
24593+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
24594+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
24595 "7: movl 8(%4), %%eax\n"
24596 "8: movl 12(%4),%%edx\n"
24597- "9: movl %%eax, 8(%3)\n"
24598- "10: movl %%edx, 12(%3)\n"
24599+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
24600+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
24601 "11: movl 16(%4), %%eax\n"
24602 "12: movl 20(%4), %%edx\n"
24603- "13: movl %%eax, 16(%3)\n"
24604- "14: movl %%edx, 20(%3)\n"
24605+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
24606+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
24607 "15: movl 24(%4), %%eax\n"
24608 "16: movl 28(%4), %%edx\n"
24609- "17: movl %%eax, 24(%3)\n"
24610- "18: movl %%edx, 28(%3)\n"
24611+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
24612+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
24613 "19: movl 32(%4), %%eax\n"
24614 "20: movl 36(%4), %%edx\n"
24615- "21: movl %%eax, 32(%3)\n"
24616- "22: movl %%edx, 36(%3)\n"
24617+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
24618+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
24619 "23: movl 40(%4), %%eax\n"
24620 "24: movl 44(%4), %%edx\n"
24621- "25: movl %%eax, 40(%3)\n"
24622- "26: movl %%edx, 44(%3)\n"
24623+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
24624+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
24625 "27: movl 48(%4), %%eax\n"
24626 "28: movl 52(%4), %%edx\n"
24627- "29: movl %%eax, 48(%3)\n"
24628- "30: movl %%edx, 52(%3)\n"
24629+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
24630+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
24631 "31: movl 56(%4), %%eax\n"
24632 "32: movl 60(%4), %%edx\n"
24633- "33: movl %%eax, 56(%3)\n"
24634- "34: movl %%edx, 60(%3)\n"
24635+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
24636+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
24637 " addl $-64, %0\n"
24638 " addl $64, %4\n"
24639 " addl $64, %3\n"
24640@@ -278,10 +282,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24641 " shrl $2, %0\n"
24642 " andl $3, %%eax\n"
24643 " cld\n"
24644+ __COPYUSER_SET_ES
24645 "99: rep; movsl\n"
24646 "36: movl %%eax, %0\n"
24647 "37: rep; movsb\n"
24648 "100:\n"
24649+ __COPYUSER_RESTORE_ES
24650 ".section .fixup,\"ax\"\n"
24651 "101: lea 0(%%eax,%0,4),%0\n"
24652 " jmp 100b\n"
24653@@ -334,46 +340,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24654 }
24655
24656 static unsigned long
24657+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
24658+{
24659+ int d0, d1;
24660+ __asm__ __volatile__(
24661+ " .align 2,0x90\n"
24662+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
24663+ " cmpl $67, %0\n"
24664+ " jbe 3f\n"
24665+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
24666+ " .align 2,0x90\n"
24667+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
24668+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
24669+ "5: movl %%eax, 0(%3)\n"
24670+ "6: movl %%edx, 4(%3)\n"
24671+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
24672+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
24673+ "9: movl %%eax, 8(%3)\n"
24674+ "10: movl %%edx, 12(%3)\n"
24675+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
24676+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
24677+ "13: movl %%eax, 16(%3)\n"
24678+ "14: movl %%edx, 20(%3)\n"
24679+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
24680+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
24681+ "17: movl %%eax, 24(%3)\n"
24682+ "18: movl %%edx, 28(%3)\n"
24683+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
24684+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
24685+ "21: movl %%eax, 32(%3)\n"
24686+ "22: movl %%edx, 36(%3)\n"
24687+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
24688+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
24689+ "25: movl %%eax, 40(%3)\n"
24690+ "26: movl %%edx, 44(%3)\n"
24691+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
24692+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
24693+ "29: movl %%eax, 48(%3)\n"
24694+ "30: movl %%edx, 52(%3)\n"
24695+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
24696+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
24697+ "33: movl %%eax, 56(%3)\n"
24698+ "34: movl %%edx, 60(%3)\n"
24699+ " addl $-64, %0\n"
24700+ " addl $64, %4\n"
24701+ " addl $64, %3\n"
24702+ " cmpl $63, %0\n"
24703+ " ja 1b\n"
24704+ "35: movl %0, %%eax\n"
24705+ " shrl $2, %0\n"
24706+ " andl $3, %%eax\n"
24707+ " cld\n"
24708+ "99: rep; "__copyuser_seg" movsl\n"
24709+ "36: movl %%eax, %0\n"
24710+ "37: rep; "__copyuser_seg" movsb\n"
24711+ "100:\n"
24712+ ".section .fixup,\"ax\"\n"
24713+ "101: lea 0(%%eax,%0,4),%0\n"
24714+ " jmp 100b\n"
24715+ ".previous\n"
24716+ ".section __ex_table,\"a\"\n"
24717+ " .align 4\n"
24718+ " .long 1b,100b\n"
24719+ " .long 2b,100b\n"
24720+ " .long 3b,100b\n"
24721+ " .long 4b,100b\n"
24722+ " .long 5b,100b\n"
24723+ " .long 6b,100b\n"
24724+ " .long 7b,100b\n"
24725+ " .long 8b,100b\n"
24726+ " .long 9b,100b\n"
24727+ " .long 10b,100b\n"
24728+ " .long 11b,100b\n"
24729+ " .long 12b,100b\n"
24730+ " .long 13b,100b\n"
24731+ " .long 14b,100b\n"
24732+ " .long 15b,100b\n"
24733+ " .long 16b,100b\n"
24734+ " .long 17b,100b\n"
24735+ " .long 18b,100b\n"
24736+ " .long 19b,100b\n"
24737+ " .long 20b,100b\n"
24738+ " .long 21b,100b\n"
24739+ " .long 22b,100b\n"
24740+ " .long 23b,100b\n"
24741+ " .long 24b,100b\n"
24742+ " .long 25b,100b\n"
24743+ " .long 26b,100b\n"
24744+ " .long 27b,100b\n"
24745+ " .long 28b,100b\n"
24746+ " .long 29b,100b\n"
24747+ " .long 30b,100b\n"
24748+ " .long 31b,100b\n"
24749+ " .long 32b,100b\n"
24750+ " .long 33b,100b\n"
24751+ " .long 34b,100b\n"
24752+ " .long 35b,100b\n"
24753+ " .long 36b,100b\n"
24754+ " .long 37b,100b\n"
24755+ " .long 99b,101b\n"
24756+ ".previous"
24757+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
24758+ : "1"(to), "2"(from), "0"(size)
24759+ : "eax", "edx", "memory");
24760+ return size;
24761+}
24762+
24763+static unsigned long
24764+__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
24765+static unsigned long
24766 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24767 {
24768 int d0, d1;
24769 __asm__ __volatile__(
24770 " .align 2,0x90\n"
24771- "0: movl 32(%4), %%eax\n"
24772+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24773 " cmpl $67, %0\n"
24774 " jbe 2f\n"
24775- "1: movl 64(%4), %%eax\n"
24776+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24777 " .align 2,0x90\n"
24778- "2: movl 0(%4), %%eax\n"
24779- "21: movl 4(%4), %%edx\n"
24780+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24781+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24782 " movl %%eax, 0(%3)\n"
24783 " movl %%edx, 4(%3)\n"
24784- "3: movl 8(%4), %%eax\n"
24785- "31: movl 12(%4),%%edx\n"
24786+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24787+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24788 " movl %%eax, 8(%3)\n"
24789 " movl %%edx, 12(%3)\n"
24790- "4: movl 16(%4), %%eax\n"
24791- "41: movl 20(%4), %%edx\n"
24792+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24793+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24794 " movl %%eax, 16(%3)\n"
24795 " movl %%edx, 20(%3)\n"
24796- "10: movl 24(%4), %%eax\n"
24797- "51: movl 28(%4), %%edx\n"
24798+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24799+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24800 " movl %%eax, 24(%3)\n"
24801 " movl %%edx, 28(%3)\n"
24802- "11: movl 32(%4), %%eax\n"
24803- "61: movl 36(%4), %%edx\n"
24804+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24805+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24806 " movl %%eax, 32(%3)\n"
24807 " movl %%edx, 36(%3)\n"
24808- "12: movl 40(%4), %%eax\n"
24809- "71: movl 44(%4), %%edx\n"
24810+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24811+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24812 " movl %%eax, 40(%3)\n"
24813 " movl %%edx, 44(%3)\n"
24814- "13: movl 48(%4), %%eax\n"
24815- "81: movl 52(%4), %%edx\n"
24816+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24817+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24818 " movl %%eax, 48(%3)\n"
24819 " movl %%edx, 52(%3)\n"
24820- "14: movl 56(%4), %%eax\n"
24821- "91: movl 60(%4), %%edx\n"
24822+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24823+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24824 " movl %%eax, 56(%3)\n"
24825 " movl %%edx, 60(%3)\n"
24826 " addl $-64, %0\n"
24827@@ -385,9 +500,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24828 " shrl $2, %0\n"
24829 " andl $3, %%eax\n"
24830 " cld\n"
24831- "6: rep; movsl\n"
24832+ "6: rep; "__copyuser_seg" movsl\n"
24833 " movl %%eax,%0\n"
24834- "7: rep; movsb\n"
24835+ "7: rep; "__copyuser_seg" movsb\n"
24836 "8:\n"
24837 ".section .fixup,\"ax\"\n"
24838 "9: lea 0(%%eax,%0,4),%0\n"
24839@@ -434,47 +549,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24840 */
24841
24842 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24843+ const void __user *from, unsigned long size) __size_overflow(3);
24844+static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24845 const void __user *from, unsigned long size)
24846 {
24847 int d0, d1;
24848
24849 __asm__ __volatile__(
24850 " .align 2,0x90\n"
24851- "0: movl 32(%4), %%eax\n"
24852+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24853 " cmpl $67, %0\n"
24854 " jbe 2f\n"
24855- "1: movl 64(%4), %%eax\n"
24856+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24857 " .align 2,0x90\n"
24858- "2: movl 0(%4), %%eax\n"
24859- "21: movl 4(%4), %%edx\n"
24860+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24861+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24862 " movnti %%eax, 0(%3)\n"
24863 " movnti %%edx, 4(%3)\n"
24864- "3: movl 8(%4), %%eax\n"
24865- "31: movl 12(%4),%%edx\n"
24866+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24867+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24868 " movnti %%eax, 8(%3)\n"
24869 " movnti %%edx, 12(%3)\n"
24870- "4: movl 16(%4), %%eax\n"
24871- "41: movl 20(%4), %%edx\n"
24872+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24873+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24874 " movnti %%eax, 16(%3)\n"
24875 " movnti %%edx, 20(%3)\n"
24876- "10: movl 24(%4), %%eax\n"
24877- "51: movl 28(%4), %%edx\n"
24878+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24879+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24880 " movnti %%eax, 24(%3)\n"
24881 " movnti %%edx, 28(%3)\n"
24882- "11: movl 32(%4), %%eax\n"
24883- "61: movl 36(%4), %%edx\n"
24884+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24885+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24886 " movnti %%eax, 32(%3)\n"
24887 " movnti %%edx, 36(%3)\n"
24888- "12: movl 40(%4), %%eax\n"
24889- "71: movl 44(%4), %%edx\n"
24890+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24891+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24892 " movnti %%eax, 40(%3)\n"
24893 " movnti %%edx, 44(%3)\n"
24894- "13: movl 48(%4), %%eax\n"
24895- "81: movl 52(%4), %%edx\n"
24896+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24897+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24898 " movnti %%eax, 48(%3)\n"
24899 " movnti %%edx, 52(%3)\n"
24900- "14: movl 56(%4), %%eax\n"
24901- "91: movl 60(%4), %%edx\n"
24902+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24903+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24904 " movnti %%eax, 56(%3)\n"
24905 " movnti %%edx, 60(%3)\n"
24906 " addl $-64, %0\n"
24907@@ -487,9 +604,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24908 " shrl $2, %0\n"
24909 " andl $3, %%eax\n"
24910 " cld\n"
24911- "6: rep; movsl\n"
24912+ "6: rep; "__copyuser_seg" movsl\n"
24913 " movl %%eax,%0\n"
24914- "7: rep; movsb\n"
24915+ "7: rep; "__copyuser_seg" movsb\n"
24916 "8:\n"
24917 ".section .fixup,\"ax\"\n"
24918 "9: lea 0(%%eax,%0,4),%0\n"
24919@@ -531,47 +648,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24920 }
24921
24922 static unsigned long __copy_user_intel_nocache(void *to,
24923+ const void __user *from, unsigned long size) __size_overflow(3);
24924+static unsigned long __copy_user_intel_nocache(void *to,
24925 const void __user *from, unsigned long size)
24926 {
24927 int d0, d1;
24928
24929 __asm__ __volatile__(
24930 " .align 2,0x90\n"
24931- "0: movl 32(%4), %%eax\n"
24932+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24933 " cmpl $67, %0\n"
24934 " jbe 2f\n"
24935- "1: movl 64(%4), %%eax\n"
24936+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24937 " .align 2,0x90\n"
24938- "2: movl 0(%4), %%eax\n"
24939- "21: movl 4(%4), %%edx\n"
24940+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24941+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24942 " movnti %%eax, 0(%3)\n"
24943 " movnti %%edx, 4(%3)\n"
24944- "3: movl 8(%4), %%eax\n"
24945- "31: movl 12(%4),%%edx\n"
24946+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24947+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24948 " movnti %%eax, 8(%3)\n"
24949 " movnti %%edx, 12(%3)\n"
24950- "4: movl 16(%4), %%eax\n"
24951- "41: movl 20(%4), %%edx\n"
24952+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24953+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24954 " movnti %%eax, 16(%3)\n"
24955 " movnti %%edx, 20(%3)\n"
24956- "10: movl 24(%4), %%eax\n"
24957- "51: movl 28(%4), %%edx\n"
24958+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24959+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24960 " movnti %%eax, 24(%3)\n"
24961 " movnti %%edx, 28(%3)\n"
24962- "11: movl 32(%4), %%eax\n"
24963- "61: movl 36(%4), %%edx\n"
24964+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24965+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24966 " movnti %%eax, 32(%3)\n"
24967 " movnti %%edx, 36(%3)\n"
24968- "12: movl 40(%4), %%eax\n"
24969- "71: movl 44(%4), %%edx\n"
24970+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24971+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24972 " movnti %%eax, 40(%3)\n"
24973 " movnti %%edx, 44(%3)\n"
24974- "13: movl 48(%4), %%eax\n"
24975- "81: movl 52(%4), %%edx\n"
24976+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24977+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24978 " movnti %%eax, 48(%3)\n"
24979 " movnti %%edx, 52(%3)\n"
24980- "14: movl 56(%4), %%eax\n"
24981- "91: movl 60(%4), %%edx\n"
24982+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24983+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24984 " movnti %%eax, 56(%3)\n"
24985 " movnti %%edx, 60(%3)\n"
24986 " addl $-64, %0\n"
24987@@ -584,9 +703,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
24988 " shrl $2, %0\n"
24989 " andl $3, %%eax\n"
24990 " cld\n"
24991- "6: rep; movsl\n"
24992+ "6: rep; "__copyuser_seg" movsl\n"
24993 " movl %%eax,%0\n"
24994- "7: rep; movsb\n"
24995+ "7: rep; "__copyuser_seg" movsb\n"
24996 "8:\n"
24997 ".section .fixup,\"ax\"\n"
24998 "9: lea 0(%%eax,%0,4),%0\n"
24999@@ -629,32 +748,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
25000 */
25001 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
25002 unsigned long size);
25003-unsigned long __copy_user_intel(void __user *to, const void *from,
25004+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
25005+ unsigned long size);
25006+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
25007 unsigned long size);
25008 unsigned long __copy_user_zeroing_intel_nocache(void *to,
25009 const void __user *from, unsigned long size);
25010 #endif /* CONFIG_X86_INTEL_USERCOPY */
25011
25012 /* Generic arbitrary sized copy. */
25013-#define __copy_user(to, from, size) \
25014+#define __copy_user(to, from, size, prefix, set, restore) \
25015 do { \
25016 int __d0, __d1, __d2; \
25017 __asm__ __volatile__( \
25018+ set \
25019 " cmp $7,%0\n" \
25020 " jbe 1f\n" \
25021 " movl %1,%0\n" \
25022 " negl %0\n" \
25023 " andl $7,%0\n" \
25024 " subl %0,%3\n" \
25025- "4: rep; movsb\n" \
25026+ "4: rep; "prefix"movsb\n" \
25027 " movl %3,%0\n" \
25028 " shrl $2,%0\n" \
25029 " andl $3,%3\n" \
25030 " .align 2,0x90\n" \
25031- "0: rep; movsl\n" \
25032+ "0: rep; "prefix"movsl\n" \
25033 " movl %3,%0\n" \
25034- "1: rep; movsb\n" \
25035+ "1: rep; "prefix"movsb\n" \
25036 "2:\n" \
25037+ restore \
25038 ".section .fixup,\"ax\"\n" \
25039 "5: addl %3,%0\n" \
25040 " jmp 2b\n" \
25041@@ -682,14 +805,14 @@ do { \
25042 " negl %0\n" \
25043 " andl $7,%0\n" \
25044 " subl %0,%3\n" \
25045- "4: rep; movsb\n" \
25046+ "4: rep; "__copyuser_seg"movsb\n" \
25047 " movl %3,%0\n" \
25048 " shrl $2,%0\n" \
25049 " andl $3,%3\n" \
25050 " .align 2,0x90\n" \
25051- "0: rep; movsl\n" \
25052+ "0: rep; "__copyuser_seg"movsl\n" \
25053 " movl %3,%0\n" \
25054- "1: rep; movsb\n" \
25055+ "1: rep; "__copyuser_seg"movsb\n" \
25056 "2:\n" \
25057 ".section .fixup,\"ax\"\n" \
25058 "5: addl %3,%0\n" \
25059@@ -775,9 +898,9 @@ survive:
25060 }
25061 #endif
25062 if (movsl_is_ok(to, from, n))
25063- __copy_user(to, from, n);
25064+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
25065 else
25066- n = __copy_user_intel(to, from, n);
25067+ n = __generic_copy_to_user_intel(to, from, n);
25068 return n;
25069 }
25070 EXPORT_SYMBOL(__copy_to_user_ll);
25071@@ -797,10 +920,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
25072 unsigned long n)
25073 {
25074 if (movsl_is_ok(to, from, n))
25075- __copy_user(to, from, n);
25076+ __copy_user(to, from, n, __copyuser_seg, "", "");
25077 else
25078- n = __copy_user_intel((void __user *)to,
25079- (const void *)from, n);
25080+ n = __generic_copy_from_user_intel(to, from, n);
25081 return n;
25082 }
25083 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
25084@@ -827,59 +949,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
25085 if (n > 64 && cpu_has_xmm2)
25086 n = __copy_user_intel_nocache(to, from, n);
25087 else
25088- __copy_user(to, from, n);
25089+ __copy_user(to, from, n, __copyuser_seg, "", "");
25090 #else
25091- __copy_user(to, from, n);
25092+ __copy_user(to, from, n, __copyuser_seg, "", "");
25093 #endif
25094 return n;
25095 }
25096 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
25097
25098-/**
25099- * copy_to_user: - Copy a block of data into user space.
25100- * @to: Destination address, in user space.
25101- * @from: Source address, in kernel space.
25102- * @n: Number of bytes to copy.
25103- *
25104- * Context: User context only. This function may sleep.
25105- *
25106- * Copy data from kernel space to user space.
25107- *
25108- * Returns number of bytes that could not be copied.
25109- * On success, this will be zero.
25110- */
25111-unsigned long
25112-copy_to_user(void __user *to, const void *from, unsigned long n)
25113+#ifdef CONFIG_PAX_MEMORY_UDEREF
25114+void __set_fs(mm_segment_t x)
25115 {
25116- if (access_ok(VERIFY_WRITE, to, n))
25117- n = __copy_to_user(to, from, n);
25118- return n;
25119+ switch (x.seg) {
25120+ case 0:
25121+ loadsegment(gs, 0);
25122+ break;
25123+ case TASK_SIZE_MAX:
25124+ loadsegment(gs, __USER_DS);
25125+ break;
25126+ case -1UL:
25127+ loadsegment(gs, __KERNEL_DS);
25128+ break;
25129+ default:
25130+ BUG();
25131+ }
25132+ return;
25133 }
25134-EXPORT_SYMBOL(copy_to_user);
25135+EXPORT_SYMBOL(__set_fs);
25136
25137-/**
25138- * copy_from_user: - Copy a block of data from user space.
25139- * @to: Destination address, in kernel space.
25140- * @from: Source address, in user space.
25141- * @n: Number of bytes to copy.
25142- *
25143- * Context: User context only. This function may sleep.
25144- *
25145- * Copy data from user space to kernel space.
25146- *
25147- * Returns number of bytes that could not be copied.
25148- * On success, this will be zero.
25149- *
25150- * If some data could not be copied, this function will pad the copied
25151- * data to the requested size using zero bytes.
25152- */
25153-unsigned long
25154-copy_from_user(void *to, const void __user *from, unsigned long n)
25155+void set_fs(mm_segment_t x)
25156 {
25157- if (access_ok(VERIFY_READ, from, n))
25158- n = __copy_from_user(to, from, n);
25159- else
25160- memset(to, 0, n);
25161- return n;
25162+ current_thread_info()->addr_limit = x;
25163+ __set_fs(x);
25164 }
25165-EXPORT_SYMBOL(copy_from_user);
25166+EXPORT_SYMBOL(set_fs);
25167+#endif
25168diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
25169index b7c2849..bab76d3 100644
25170--- a/arch/x86/lib/usercopy_64.c
25171+++ b/arch/x86/lib/usercopy_64.c
25172@@ -39,16 +39,22 @@ do { \
25173 } while (0)
25174
25175 long
25176-__strncpy_from_user(char *dst, const char __user *src, long count)
25177+__strncpy_from_user(char *dst, const char __user *src, unsigned long count)
25178 {
25179 long res;
25180+
25181+#ifdef CONFIG_PAX_MEMORY_UDEREF
25182+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
25183+ src += PAX_USER_SHADOW_BASE;
25184+#endif
25185+
25186 __do_strncpy_from_user(dst, src, count, res);
25187 return res;
25188 }
25189 EXPORT_SYMBOL(__strncpy_from_user);
25190
25191 long
25192-strncpy_from_user(char *dst, const char __user *src, long count)
25193+strncpy_from_user(char *dst, const char __user *src, unsigned long count)
25194 {
25195 long res = -EFAULT;
25196 if (access_ok(VERIFY_READ, src, 1))
25197@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
25198 {
25199 long __d0;
25200 might_fault();
25201+
25202+#ifdef CONFIG_PAX_MEMORY_UDEREF
25203+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
25204+ addr += PAX_USER_SHADOW_BASE;
25205+#endif
25206+
25207 /* no memory constraint because it doesn't change any memory gcc knows
25208 about */
25209 asm volatile(
25210@@ -107,7 +119,7 @@ EXPORT_SYMBOL(clear_user);
25211 * Return 0 on exception, a value greater than N if too long
25212 */
25213
25214-long __strnlen_user(const char __user *s, long n)
25215+long __strnlen_user(const char __user *s, unsigned long n)
25216 {
25217 long res = 0;
25218 char c;
25219@@ -125,7 +137,7 @@ long __strnlen_user(const char __user *s, long n)
25220 }
25221 EXPORT_SYMBOL(__strnlen_user);
25222
25223-long strnlen_user(const char __user *s, long n)
25224+long strnlen_user(const char __user *s, unsigned long n)
25225 {
25226 if (!access_ok(VERIFY_READ, s, 1))
25227 return 0;
25228@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
25229 }
25230 EXPORT_SYMBOL(strlen_user);
25231
25232-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
25233+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
25234 {
25235- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
25236- return copy_user_generic((__force void *)to, (__force void *)from, len);
25237- }
25238- return len;
25239+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
25240+
25241+#ifdef CONFIG_PAX_MEMORY_UDEREF
25242+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
25243+ to += PAX_USER_SHADOW_BASE;
25244+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
25245+ from += PAX_USER_SHADOW_BASE;
25246+#endif
25247+
25248+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
25249+ }
25250+ return len;
25251 }
25252 EXPORT_SYMBOL(copy_in_user);
25253
25254@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
25255 * it is not necessary to optimize tail handling.
25256 */
25257 unsigned long
25258-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
25259+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
25260 {
25261 char c;
25262 unsigned zero_len;
25263diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
25264index 61b41ca..5fef66a 100644
25265--- a/arch/x86/mm/extable.c
25266+++ b/arch/x86/mm/extable.c
25267@@ -1,14 +1,71 @@
25268 #include <linux/module.h>
25269 #include <linux/spinlock.h>
25270+#include <linux/sort.h>
25271 #include <asm/uaccess.h>
25272+#include <asm/pgtable.h>
25273
25274+/*
25275+ * The exception table needs to be sorted so that the binary
25276+ * search that we use to find entries in it works properly.
25277+ * This is used both for the kernel exception table and for
25278+ * the exception tables of modules that get loaded.
25279+ */
25280+static int cmp_ex(const void *a, const void *b)
25281+{
25282+ const struct exception_table_entry *x = a, *y = b;
25283+
25284+ /* avoid overflow */
25285+ if (x->insn > y->insn)
25286+ return 1;
25287+ if (x->insn < y->insn)
25288+ return -1;
25289+ return 0;
25290+}
25291+
25292+static void swap_ex(void *a, void *b, int size)
25293+{
25294+ struct exception_table_entry t, *x = a, *y = b;
25295+
25296+ t = *x;
25297+
25298+ pax_open_kernel();
25299+ *x = *y;
25300+ *y = t;
25301+ pax_close_kernel();
25302+}
25303+
25304+void sort_extable(struct exception_table_entry *start,
25305+ struct exception_table_entry *finish)
25306+{
25307+ sort(start, finish - start, sizeof(struct exception_table_entry),
25308+ cmp_ex, swap_ex);
25309+}
25310+
25311+#ifdef CONFIG_MODULES
25312+/*
25313+ * If the exception table is sorted, any referring to the module init
25314+ * will be at the beginning or the end.
25315+ */
25316+void trim_init_extable(struct module *m)
25317+{
25318+ /*trim the beginning*/
25319+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
25320+ m->extable++;
25321+ m->num_exentries--;
25322+ }
25323+ /*trim the end*/
25324+ while (m->num_exentries &&
25325+ within_module_init(m->extable[m->num_exentries-1].insn, m))
25326+ m->num_exentries--;
25327+}
25328+#endif /* CONFIG_MODULES */
25329
25330 int fixup_exception(struct pt_regs *regs)
25331 {
25332 const struct exception_table_entry *fixup;
25333
25334 #ifdef CONFIG_PNPBIOS
25335- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
25336+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
25337 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
25338 extern u32 pnp_bios_is_utter_crap;
25339 pnp_bios_is_utter_crap = 1;
25340diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
25341index 8ac0d76..ca501e2 100644
25342--- a/arch/x86/mm/fault.c
25343+++ b/arch/x86/mm/fault.c
25344@@ -11,10 +11,19 @@
25345 #include <linux/kprobes.h> /* __kprobes, ... */
25346 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
25347 #include <linux/perf_event.h> /* perf_sw_event */
25348+#include <linux/unistd.h>
25349+#include <linux/compiler.h>
25350
25351 #include <asm/traps.h> /* dotraplinkage, ... */
25352 #include <asm/pgalloc.h> /* pgd_*(), ... */
25353 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
25354+#include <asm/vsyscall.h>
25355+#include <asm/tlbflush.h>
25356+
25357+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25358+#include <asm/stacktrace.h>
25359+#include "../kernel/dumpstack.h"
25360+#endif
25361
25362 /*
25363 * Page fault error code bits:
25364@@ -51,7 +60,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
25365 int ret = 0;
25366
25367 /* kprobe_running() needs smp_processor_id() */
25368- if (kprobes_built_in() && !user_mode_vm(regs)) {
25369+ if (kprobes_built_in() && !user_mode(regs)) {
25370 preempt_disable();
25371 if (kprobe_running() && kprobe_fault_handler(regs, 14))
25372 ret = 1;
25373@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
25374 return !instr_lo || (instr_lo>>1) == 1;
25375 case 0x00:
25376 /* Prefetch instruction is 0x0F0D or 0x0F18 */
25377- if (probe_kernel_address(instr, opcode))
25378+ if (user_mode(regs)) {
25379+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
25380+ return 0;
25381+ } else if (probe_kernel_address(instr, opcode))
25382 return 0;
25383
25384 *prefetch = (instr_lo == 0xF) &&
25385@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
25386 while (instr < max_instr) {
25387 unsigned char opcode;
25388
25389- if (probe_kernel_address(instr, opcode))
25390+ if (user_mode(regs)) {
25391+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
25392+ break;
25393+ } else if (probe_kernel_address(instr, opcode))
25394 break;
25395
25396 instr++;
25397@@ -172,6 +187,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
25398 force_sig_info(si_signo, &info, tsk);
25399 }
25400
25401+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25402+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
25403+#endif
25404+
25405+#ifdef CONFIG_PAX_EMUTRAMP
25406+static int pax_handle_fetch_fault(struct pt_regs *regs);
25407+#endif
25408+
25409+#ifdef CONFIG_PAX_PAGEEXEC
25410+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
25411+{
25412+ pgd_t *pgd;
25413+ pud_t *pud;
25414+ pmd_t *pmd;
25415+
25416+ pgd = pgd_offset(mm, address);
25417+ if (!pgd_present(*pgd))
25418+ return NULL;
25419+ pud = pud_offset(pgd, address);
25420+ if (!pud_present(*pud))
25421+ return NULL;
25422+ pmd = pmd_offset(pud, address);
25423+ if (!pmd_present(*pmd))
25424+ return NULL;
25425+ return pmd;
25426+}
25427+#endif
25428+
25429 DEFINE_SPINLOCK(pgd_lock);
25430 LIST_HEAD(pgd_list);
25431
25432@@ -224,11 +267,24 @@ void vmalloc_sync_all(void)
25433 address += PMD_SIZE) {
25434
25435 unsigned long flags;
25436+
25437+#ifdef CONFIG_PAX_PER_CPU_PGD
25438+ unsigned long cpu;
25439+#else
25440 struct page *page;
25441+#endif
25442
25443 spin_lock_irqsave(&pgd_lock, flags);
25444+
25445+#ifdef CONFIG_PAX_PER_CPU_PGD
25446+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25447+ pgd_t *pgd = get_cpu_pgd(cpu);
25448+#else
25449 list_for_each_entry(page, &pgd_list, lru) {
25450- if (!vmalloc_sync_one(page_address(page), address))
25451+ pgd_t *pgd = page_address(page);
25452+#endif
25453+
25454+ if (!vmalloc_sync_one(pgd, address))
25455 break;
25456 }
25457 spin_unlock_irqrestore(&pgd_lock, flags);
25458@@ -258,6 +314,11 @@ static noinline int vmalloc_fault(unsigned long address)
25459 * an interrupt in the middle of a task switch..
25460 */
25461 pgd_paddr = read_cr3();
25462+
25463+#ifdef CONFIG_PAX_PER_CPU_PGD
25464+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
25465+#endif
25466+
25467 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
25468 if (!pmd_k)
25469 return -1;
25470@@ -332,15 +393,27 @@ void vmalloc_sync_all(void)
25471
25472 const pgd_t *pgd_ref = pgd_offset_k(address);
25473 unsigned long flags;
25474+
25475+#ifdef CONFIG_PAX_PER_CPU_PGD
25476+ unsigned long cpu;
25477+#else
25478 struct page *page;
25479+#endif
25480
25481 if (pgd_none(*pgd_ref))
25482 continue;
25483
25484 spin_lock_irqsave(&pgd_lock, flags);
25485+
25486+#ifdef CONFIG_PAX_PER_CPU_PGD
25487+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25488+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
25489+#else
25490 list_for_each_entry(page, &pgd_list, lru) {
25491 pgd_t *pgd;
25492 pgd = (pgd_t *)page_address(page) + pgd_index(address);
25493+#endif
25494+
25495 if (pgd_none(*pgd))
25496 set_pgd(pgd, *pgd_ref);
25497 else
25498@@ -373,7 +446,14 @@ static noinline int vmalloc_fault(unsigned long address)
25499 * happen within a race in page table update. In the later
25500 * case just flush:
25501 */
25502+
25503+#ifdef CONFIG_PAX_PER_CPU_PGD
25504+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
25505+ pgd = pgd_offset_cpu(smp_processor_id(), address);
25506+#else
25507 pgd = pgd_offset(current->active_mm, address);
25508+#endif
25509+
25510 pgd_ref = pgd_offset_k(address);
25511 if (pgd_none(*pgd_ref))
25512 return -1;
25513@@ -535,7 +615,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
25514 static int is_errata100(struct pt_regs *regs, unsigned long address)
25515 {
25516 #ifdef CONFIG_X86_64
25517- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
25518+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
25519 return 1;
25520 #endif
25521 return 0;
25522@@ -562,7 +642,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
25523 }
25524
25525 static const char nx_warning[] = KERN_CRIT
25526-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
25527+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
25528
25529 static void
25530 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
25531@@ -571,15 +651,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
25532 if (!oops_may_print())
25533 return;
25534
25535- if (error_code & PF_INSTR) {
25536+ if (nx_enabled && (error_code & PF_INSTR)) {
25537 unsigned int level;
25538
25539 pte_t *pte = lookup_address(address, &level);
25540
25541 if (pte && pte_present(*pte) && !pte_exec(*pte))
25542- printk(nx_warning, current_uid());
25543+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
25544 }
25545
25546+#ifdef CONFIG_PAX_KERNEXEC
25547+ if (init_mm.start_code <= address && address < init_mm.end_code) {
25548+ if (current->signal->curr_ip)
25549+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
25550+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
25551+ else
25552+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
25553+ current->comm, task_pid_nr(current), current_uid(), current_euid());
25554+ }
25555+#endif
25556+
25557 printk(KERN_ALERT "BUG: unable to handle kernel ");
25558 if (address < PAGE_SIZE)
25559 printk(KERN_CONT "NULL pointer dereference");
25560@@ -705,6 +796,23 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
25561 {
25562 struct task_struct *tsk = current;
25563
25564+#ifdef CONFIG_X86_64
25565+ struct mm_struct *mm = tsk->mm;
25566+
25567+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
25568+ if (regs->ip == (unsigned long)vgettimeofday) {
25569+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
25570+ return;
25571+ } else if (regs->ip == (unsigned long)vtime) {
25572+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
25573+ return;
25574+ } else if (regs->ip == (unsigned long)vgetcpu) {
25575+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
25576+ return;
25577+ }
25578+ }
25579+#endif
25580+
25581 /* User mode accesses just cause a SIGSEGV */
25582 if (error_code & PF_USER) {
25583 /*
25584@@ -722,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
25585 if (is_errata100(regs, address))
25586 return;
25587
25588+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25589+ if (pax_is_fetch_fault(regs, error_code, address)) {
25590+
25591+#ifdef CONFIG_PAX_EMUTRAMP
25592+ switch (pax_handle_fetch_fault(regs)) {
25593+ case 2:
25594+ return;
25595+ }
25596+#endif
25597+
25598+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
25599+ do_group_exit(SIGKILL);
25600+ }
25601+#endif
25602+
25603 if (unlikely(show_unhandled_signals))
25604 show_signal_msg(regs, error_code, address, tsk);
25605
25606@@ -818,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
25607 if (fault & VM_FAULT_HWPOISON) {
25608 printk(KERN_ERR
25609 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
25610- tsk->comm, tsk->pid, address);
25611+ tsk->comm, task_pid_nr(tsk), address);
25612 code = BUS_MCEERR_AR;
25613 }
25614 #endif
25615@@ -857,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
25616 return 1;
25617 }
25618
25619+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25620+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
25621+{
25622+ pte_t *pte;
25623+ pmd_t *pmd;
25624+ spinlock_t *ptl;
25625+ unsigned char pte_mask;
25626+
25627+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
25628+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
25629+ return 0;
25630+
25631+ /* PaX: it's our fault, let's handle it if we can */
25632+
25633+ /* PaX: take a look at read faults before acquiring any locks */
25634+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
25635+ /* instruction fetch attempt from a protected page in user mode */
25636+ up_read(&mm->mmap_sem);
25637+
25638+#ifdef CONFIG_PAX_EMUTRAMP
25639+ switch (pax_handle_fetch_fault(regs)) {
25640+ case 2:
25641+ return 1;
25642+ }
25643+#endif
25644+
25645+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
25646+ do_group_exit(SIGKILL);
25647+ }
25648+
25649+ pmd = pax_get_pmd(mm, address);
25650+ if (unlikely(!pmd))
25651+ return 0;
25652+
25653+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
25654+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
25655+ pte_unmap_unlock(pte, ptl);
25656+ return 0;
25657+ }
25658+
25659+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
25660+ /* write attempt to a protected page in user mode */
25661+ pte_unmap_unlock(pte, ptl);
25662+ return 0;
25663+ }
25664+
25665+#ifdef CONFIG_SMP
25666+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
25667+#else
25668+ if (likely(address > get_limit(regs->cs)))
25669+#endif
25670+ {
25671+ set_pte(pte, pte_mkread(*pte));
25672+ __flush_tlb_one(address);
25673+ pte_unmap_unlock(pte, ptl);
25674+ up_read(&mm->mmap_sem);
25675+ return 1;
25676+ }
25677+
25678+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
25679+
25680+ /*
25681+ * PaX: fill DTLB with user rights and retry
25682+ */
25683+ __asm__ __volatile__ (
25684+ "orb %2,(%1)\n"
25685+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
25686+/*
25687+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
25688+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
25689+ * page fault when examined during a TLB load attempt. this is true not only
25690+ * for PTEs holding a non-present entry but also present entries that will
25691+ * raise a page fault (such as those set up by PaX, or the copy-on-write
25692+ * mechanism). in effect it means that we do *not* need to flush the TLBs
25693+ * for our target pages since their PTEs are simply not in the TLBs at all.
25694+
25695+ * the best thing in omitting it is that we gain around 15-20% speed in the
25696+ * fast path of the page fault handler and can get rid of tracing since we
25697+ * can no longer flush unintended entries.
25698+ */
25699+ "invlpg (%0)\n"
25700+#endif
25701+ __copyuser_seg"testb $0,(%0)\n"
25702+ "xorb %3,(%1)\n"
25703+ :
25704+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
25705+ : "memory", "cc");
25706+ pte_unmap_unlock(pte, ptl);
25707+ up_read(&mm->mmap_sem);
25708+ return 1;
25709+}
25710+#endif
25711+
25712 /*
25713 * Handle a spurious fault caused by a stale TLB entry.
25714 *
25715@@ -923,6 +1139,9 @@ int show_unhandled_signals = 1;
25716 static inline int
25717 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
25718 {
25719+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
25720+ return 1;
25721+
25722 if (write) {
25723 /* write, present and write, not present: */
25724 if (unlikely(!(vma->vm_flags & VM_WRITE)))
25725@@ -956,16 +1175,30 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25726 {
25727 struct vm_area_struct *vma;
25728 struct task_struct *tsk;
25729- unsigned long address;
25730 struct mm_struct *mm;
25731 int write;
25732 int fault;
25733
25734- tsk = current;
25735- mm = tsk->mm;
25736-
25737 /* Get the faulting address: */
25738- address = read_cr2();
25739+ unsigned long address = read_cr2();
25740+
25741+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25742+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
25743+ if (!search_exception_tables(regs->ip)) {
25744+ bad_area_nosemaphore(regs, error_code, address);
25745+ return;
25746+ }
25747+ if (address < PAX_USER_SHADOW_BASE) {
25748+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
25749+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
25750+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
25751+ } else
25752+ address -= PAX_USER_SHADOW_BASE;
25753+ }
25754+#endif
25755+
25756+ tsk = current;
25757+ mm = tsk->mm;
25758
25759 /*
25760 * Detect and handle instructions that would cause a page fault for
25761@@ -1026,7 +1259,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25762 * User-mode registers count as a user access even for any
25763 * potential system fault or CPU buglet:
25764 */
25765- if (user_mode_vm(regs)) {
25766+ if (user_mode(regs)) {
25767 local_irq_enable();
25768 error_code |= PF_USER;
25769 } else {
25770@@ -1080,6 +1313,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25771 might_sleep();
25772 }
25773
25774+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25775+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
25776+ return;
25777+#endif
25778+
25779 vma = find_vma(mm, address);
25780 if (unlikely(!vma)) {
25781 bad_area(regs, error_code, address);
25782@@ -1091,18 +1329,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25783 bad_area(regs, error_code, address);
25784 return;
25785 }
25786- if (error_code & PF_USER) {
25787- /*
25788- * Accessing the stack below %sp is always a bug.
25789- * The large cushion allows instructions like enter
25790- * and pusha to work. ("enter $65535, $31" pushes
25791- * 32 pointers and then decrements %sp by 65535.)
25792- */
25793- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
25794- bad_area(regs, error_code, address);
25795- return;
25796- }
25797+ /*
25798+ * Accessing the stack below %sp is always a bug.
25799+ * The large cushion allows instructions like enter
25800+ * and pusha to work. ("enter $65535, $31" pushes
25801+ * 32 pointers and then decrements %sp by 65535.)
25802+ */
25803+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
25804+ bad_area(regs, error_code, address);
25805+ return;
25806 }
25807+
25808+#ifdef CONFIG_PAX_SEGMEXEC
25809+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
25810+ bad_area(regs, error_code, address);
25811+ return;
25812+ }
25813+#endif
25814+
25815 if (unlikely(expand_stack(vma, address))) {
25816 bad_area(regs, error_code, address);
25817 return;
25818@@ -1146,3 +1390,292 @@ good_area:
25819
25820 up_read(&mm->mmap_sem);
25821 }
25822+
25823+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25824+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
25825+{
25826+ struct mm_struct *mm = current->mm;
25827+ unsigned long ip = regs->ip;
25828+
25829+ if (v8086_mode(regs))
25830+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
25831+
25832+#ifdef CONFIG_PAX_PAGEEXEC
25833+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
25834+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
25835+ return true;
25836+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
25837+ return true;
25838+ return false;
25839+ }
25840+#endif
25841+
25842+#ifdef CONFIG_PAX_SEGMEXEC
25843+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
25844+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
25845+ return true;
25846+ return false;
25847+ }
25848+#endif
25849+
25850+ return false;
25851+}
25852+#endif
25853+
25854+#ifdef CONFIG_PAX_EMUTRAMP
25855+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
25856+{
25857+ int err;
25858+
25859+ do { /* PaX: libffi trampoline emulation */
25860+ unsigned char mov, jmp;
25861+ unsigned int addr1, addr2;
25862+
25863+#ifdef CONFIG_X86_64
25864+ if ((regs->ip + 9) >> 32)
25865+ break;
25866+#endif
25867+
25868+ err = get_user(mov, (unsigned char __user *)regs->ip);
25869+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25870+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
25871+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25872+
25873+ if (err)
25874+ break;
25875+
25876+ if (mov == 0xB8 && jmp == 0xE9) {
25877+ regs->ax = addr1;
25878+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
25879+ return 2;
25880+ }
25881+ } while (0);
25882+
25883+ do { /* PaX: gcc trampoline emulation #1 */
25884+ unsigned char mov1, mov2;
25885+ unsigned short jmp;
25886+ unsigned int addr1, addr2;
25887+
25888+#ifdef CONFIG_X86_64
25889+ if ((regs->ip + 11) >> 32)
25890+ break;
25891+#endif
25892+
25893+ err = get_user(mov1, (unsigned char __user *)regs->ip);
25894+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25895+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
25896+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25897+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
25898+
25899+ if (err)
25900+ break;
25901+
25902+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
25903+ regs->cx = addr1;
25904+ regs->ax = addr2;
25905+ regs->ip = addr2;
25906+ return 2;
25907+ }
25908+ } while (0);
25909+
25910+ do { /* PaX: gcc trampoline emulation #2 */
25911+ unsigned char mov, jmp;
25912+ unsigned int addr1, addr2;
25913+
25914+#ifdef CONFIG_X86_64
25915+ if ((regs->ip + 9) >> 32)
25916+ break;
25917+#endif
25918+
25919+ err = get_user(mov, (unsigned char __user *)regs->ip);
25920+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25921+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
25922+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25923+
25924+ if (err)
25925+ break;
25926+
25927+ if (mov == 0xB9 && jmp == 0xE9) {
25928+ regs->cx = addr1;
25929+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
25930+ return 2;
25931+ }
25932+ } while (0);
25933+
25934+ return 1; /* PaX in action */
25935+}
25936+
25937+#ifdef CONFIG_X86_64
25938+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
25939+{
25940+ int err;
25941+
25942+ do { /* PaX: libffi trampoline emulation */
25943+ unsigned short mov1, mov2, jmp1;
25944+ unsigned char stcclc, jmp2;
25945+ unsigned long addr1, addr2;
25946+
25947+ err = get_user(mov1, (unsigned short __user *)regs->ip);
25948+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
25949+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
25950+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
25951+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
25952+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
25953+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
25954+
25955+ if (err)
25956+ break;
25957+
25958+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25959+ regs->r11 = addr1;
25960+ regs->r10 = addr2;
25961+ if (stcclc == 0xF8)
25962+ regs->flags &= ~X86_EFLAGS_CF;
25963+ else
25964+ regs->flags |= X86_EFLAGS_CF;
25965+ regs->ip = addr1;
25966+ return 2;
25967+ }
25968+ } while (0);
25969+
25970+ do { /* PaX: gcc trampoline emulation #1 */
25971+ unsigned short mov1, mov2, jmp1;
25972+ unsigned char jmp2;
25973+ unsigned int addr1;
25974+ unsigned long addr2;
25975+
25976+ err = get_user(mov1, (unsigned short __user *)regs->ip);
25977+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
25978+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
25979+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
25980+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
25981+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
25982+
25983+ if (err)
25984+ break;
25985+
25986+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25987+ regs->r11 = addr1;
25988+ regs->r10 = addr2;
25989+ regs->ip = addr1;
25990+ return 2;
25991+ }
25992+ } while (0);
25993+
25994+ do { /* PaX: gcc trampoline emulation #2 */
25995+ unsigned short mov1, mov2, jmp1;
25996+ unsigned char jmp2;
25997+ unsigned long addr1, addr2;
25998+
25999+ err = get_user(mov1, (unsigned short __user *)regs->ip);
26000+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
26001+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
26002+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
26003+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
26004+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
26005+
26006+ if (err)
26007+ break;
26008+
26009+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
26010+ regs->r11 = addr1;
26011+ regs->r10 = addr2;
26012+ regs->ip = addr1;
26013+ return 2;
26014+ }
26015+ } while (0);
26016+
26017+ return 1; /* PaX in action */
26018+}
26019+#endif
26020+
26021+/*
26022+ * PaX: decide what to do with offenders (regs->ip = fault address)
26023+ *
26024+ * returns 1 when task should be killed
26025+ * 2 when gcc trampoline was detected
26026+ */
26027+static int pax_handle_fetch_fault(struct pt_regs *regs)
26028+{
26029+ if (v8086_mode(regs))
26030+ return 1;
26031+
26032+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
26033+ return 1;
26034+
26035+#ifdef CONFIG_X86_32
26036+ return pax_handle_fetch_fault_32(regs);
26037+#else
26038+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
26039+ return pax_handle_fetch_fault_32(regs);
26040+ else
26041+ return pax_handle_fetch_fault_64(regs);
26042+#endif
26043+}
26044+#endif
26045+
26046+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26047+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
26048+{
26049+ long i;
26050+
26051+ printk(KERN_ERR "PAX: bytes at PC: ");
26052+ for (i = 0; i < 20; i++) {
26053+ unsigned char c;
26054+ if (get_user(c, (unsigned char __force_user *)pc+i))
26055+ printk(KERN_CONT "?? ");
26056+ else
26057+ printk(KERN_CONT "%02x ", c);
26058+ }
26059+ printk("\n");
26060+
26061+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
26062+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
26063+ unsigned long c;
26064+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
26065+#ifdef CONFIG_X86_32
26066+ printk(KERN_CONT "???????? ");
26067+#else
26068+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
26069+ printk(KERN_CONT "???????? ???????? ");
26070+ else
26071+ printk(KERN_CONT "???????????????? ");
26072+#endif
26073+ } else {
26074+#ifdef CONFIG_X86_64
26075+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
26076+ printk(KERN_CONT "%08x ", (unsigned int)c);
26077+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
26078+ } else
26079+#endif
26080+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
26081+ }
26082+ }
26083+ printk("\n");
26084+}
26085+#endif
26086+
26087+/**
26088+ * probe_kernel_write(): safely attempt to write to a location
26089+ * @dst: address to write to
26090+ * @src: pointer to the data that shall be written
26091+ * @size: size of the data chunk
26092+ *
26093+ * Safely write to address @dst from the buffer at @src. If a kernel fault
26094+ * happens, handle that and return -EFAULT.
26095+ */
26096+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
26097+{
26098+ long ret;
26099+ mm_segment_t old_fs = get_fs();
26100+
26101+ set_fs(KERNEL_DS);
26102+ pagefault_disable();
26103+ pax_open_kernel();
26104+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
26105+ pax_close_kernel();
26106+ pagefault_enable();
26107+ set_fs(old_fs);
26108+
26109+ return ret ? -EFAULT : 0;
26110+}
26111diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
26112index 71da1bc..7a16bf4 100644
26113--- a/arch/x86/mm/gup.c
26114+++ b/arch/x86/mm/gup.c
26115@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
26116 addr = start;
26117 len = (unsigned long) nr_pages << PAGE_SHIFT;
26118 end = start + len;
26119- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
26120+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
26121 (void __user *)start, len)))
26122 return 0;
26123
26124diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
26125index 63a6ba6..79abd7a 100644
26126--- a/arch/x86/mm/highmem_32.c
26127+++ b/arch/x86/mm/highmem_32.c
26128@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
26129 idx = type + KM_TYPE_NR*smp_processor_id();
26130 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
26131 BUG_ON(!pte_none(*(kmap_pte-idx)));
26132+
26133+ pax_open_kernel();
26134 set_pte(kmap_pte-idx, mk_pte(page, prot));
26135+ pax_close_kernel();
26136
26137 return (void *)vaddr;
26138 }
26139diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
26140index f46c3407..6ff9a26 100644
26141--- a/arch/x86/mm/hugetlbpage.c
26142+++ b/arch/x86/mm/hugetlbpage.c
26143@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
26144 struct hstate *h = hstate_file(file);
26145 struct mm_struct *mm = current->mm;
26146 struct vm_area_struct *vma;
26147- unsigned long start_addr;
26148+ unsigned long start_addr, pax_task_size = TASK_SIZE;
26149+
26150+#ifdef CONFIG_PAX_SEGMEXEC
26151+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26152+ pax_task_size = SEGMEXEC_TASK_SIZE;
26153+#endif
26154+
26155+ pax_task_size -= PAGE_SIZE;
26156
26157 if (len > mm->cached_hole_size) {
26158- start_addr = mm->free_area_cache;
26159+ start_addr = mm->free_area_cache;
26160 } else {
26161- start_addr = TASK_UNMAPPED_BASE;
26162- mm->cached_hole_size = 0;
26163+ start_addr = mm->mmap_base;
26164+ mm->cached_hole_size = 0;
26165 }
26166
26167 full_search:
26168@@ -281,26 +288,27 @@ full_search:
26169
26170 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
26171 /* At this point: (!vma || addr < vma->vm_end). */
26172- if (TASK_SIZE - len < addr) {
26173+ if (pax_task_size - len < addr) {
26174 /*
26175 * Start a new search - just in case we missed
26176 * some holes.
26177 */
26178- if (start_addr != TASK_UNMAPPED_BASE) {
26179- start_addr = TASK_UNMAPPED_BASE;
26180+ if (start_addr != mm->mmap_base) {
26181+ start_addr = mm->mmap_base;
26182 mm->cached_hole_size = 0;
26183 goto full_search;
26184 }
26185 return -ENOMEM;
26186 }
26187- if (!vma || addr + len <= vma->vm_start) {
26188- mm->free_area_cache = addr + len;
26189- return addr;
26190- }
26191+ if (check_heap_stack_gap(vma, addr, len))
26192+ break;
26193 if (addr + mm->cached_hole_size < vma->vm_start)
26194 mm->cached_hole_size = vma->vm_start - addr;
26195 addr = ALIGN(vma->vm_end, huge_page_size(h));
26196 }
26197+
26198+ mm->free_area_cache = addr + len;
26199+ return addr;
26200 }
26201
26202 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
26203@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
26204 {
26205 struct hstate *h = hstate_file(file);
26206 struct mm_struct *mm = current->mm;
26207- struct vm_area_struct *vma, *prev_vma;
26208- unsigned long base = mm->mmap_base, addr = addr0;
26209+ struct vm_area_struct *vma;
26210+ unsigned long base = mm->mmap_base, addr;
26211 unsigned long largest_hole = mm->cached_hole_size;
26212- int first_time = 1;
26213
26214 /* don't allow allocations above current base */
26215 if (mm->free_area_cache > base)
26216@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
26217 largest_hole = 0;
26218 mm->free_area_cache = base;
26219 }
26220-try_again:
26221+
26222 /* make sure it can fit in the remaining address space */
26223 if (mm->free_area_cache < len)
26224 goto fail;
26225
26226 /* either no address requested or cant fit in requested address hole */
26227- addr = (mm->free_area_cache - len) & huge_page_mask(h);
26228+ addr = (mm->free_area_cache - len);
26229 do {
26230+ addr &= huge_page_mask(h);
26231+ vma = find_vma(mm, addr);
26232 /*
26233 * Lookup failure means no vma is above this address,
26234 * i.e. return with success:
26235- */
26236- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
26237- return addr;
26238-
26239- /*
26240 * new region fits between prev_vma->vm_end and
26241 * vma->vm_start, use it:
26242 */
26243- if (addr + len <= vma->vm_start &&
26244- (!prev_vma || (addr >= prev_vma->vm_end))) {
26245+ if (check_heap_stack_gap(vma, addr, len)) {
26246 /* remember the address as a hint for next time */
26247- mm->cached_hole_size = largest_hole;
26248- return (mm->free_area_cache = addr);
26249- } else {
26250- /* pull free_area_cache down to the first hole */
26251- if (mm->free_area_cache == vma->vm_end) {
26252- mm->free_area_cache = vma->vm_start;
26253- mm->cached_hole_size = largest_hole;
26254- }
26255+ mm->cached_hole_size = largest_hole;
26256+ return (mm->free_area_cache = addr);
26257+ }
26258+ /* pull free_area_cache down to the first hole */
26259+ if (mm->free_area_cache == vma->vm_end) {
26260+ mm->free_area_cache = vma->vm_start;
26261+ mm->cached_hole_size = largest_hole;
26262 }
26263
26264 /* remember the largest hole we saw so far */
26265 if (addr + largest_hole < vma->vm_start)
26266- largest_hole = vma->vm_start - addr;
26267+ largest_hole = vma->vm_start - addr;
26268
26269 /* try just below the current vma->vm_start */
26270- addr = (vma->vm_start - len) & huge_page_mask(h);
26271- } while (len <= vma->vm_start);
26272+ addr = skip_heap_stack_gap(vma, len);
26273+ } while (!IS_ERR_VALUE(addr));
26274
26275 fail:
26276 /*
26277- * if hint left us with no space for the requested
26278- * mapping then try again:
26279- */
26280- if (first_time) {
26281- mm->free_area_cache = base;
26282- largest_hole = 0;
26283- first_time = 0;
26284- goto try_again;
26285- }
26286- /*
26287 * A failed mmap() very likely causes application failure,
26288 * so fall back to the bottom-up function here. This scenario
26289 * can happen with large stack limits and large mmap()
26290 * allocations.
26291 */
26292- mm->free_area_cache = TASK_UNMAPPED_BASE;
26293+
26294+#ifdef CONFIG_PAX_SEGMEXEC
26295+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26296+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
26297+ else
26298+#endif
26299+
26300+ mm->mmap_base = TASK_UNMAPPED_BASE;
26301+
26302+#ifdef CONFIG_PAX_RANDMMAP
26303+ if (mm->pax_flags & MF_PAX_RANDMMAP)
26304+ mm->mmap_base += mm->delta_mmap;
26305+#endif
26306+
26307+ mm->free_area_cache = mm->mmap_base;
26308 mm->cached_hole_size = ~0UL;
26309 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
26310 len, pgoff, flags);
26311@@ -387,6 +393,7 @@ fail:
26312 /*
26313 * Restore the topdown base:
26314 */
26315+ mm->mmap_base = base;
26316 mm->free_area_cache = base;
26317 mm->cached_hole_size = ~0UL;
26318
26319@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
26320 struct hstate *h = hstate_file(file);
26321 struct mm_struct *mm = current->mm;
26322 struct vm_area_struct *vma;
26323+ unsigned long pax_task_size = TASK_SIZE;
26324
26325 if (len & ~huge_page_mask(h))
26326 return -EINVAL;
26327- if (len > TASK_SIZE)
26328+
26329+#ifdef CONFIG_PAX_SEGMEXEC
26330+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26331+ pax_task_size = SEGMEXEC_TASK_SIZE;
26332+#endif
26333+
26334+ pax_task_size -= PAGE_SIZE;
26335+
26336+ if (len > pax_task_size)
26337 return -ENOMEM;
26338
26339 if (flags & MAP_FIXED) {
26340@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
26341 if (addr) {
26342 addr = ALIGN(addr, huge_page_size(h));
26343 vma = find_vma(mm, addr);
26344- if (TASK_SIZE - len >= addr &&
26345- (!vma || addr + len <= vma->vm_start))
26346+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
26347 return addr;
26348 }
26349 if (mm->get_unmapped_area == arch_get_unmapped_area)
26350diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
26351index 73ffd55..f61c2a7 100644
26352--- a/arch/x86/mm/init.c
26353+++ b/arch/x86/mm/init.c
26354@@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
26355 * cause a hotspot and fill up ZONE_DMA. The page tables
26356 * need roughly 0.5KB per GB.
26357 */
26358-#ifdef CONFIG_X86_32
26359- start = 0x7000;
26360-#else
26361- start = 0x8000;
26362-#endif
26363+ start = 0x100000;
26364 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
26365 tables, PAGE_SIZE);
26366 if (e820_table_start == -1UL)
26367@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
26368 #endif
26369
26370 set_nx();
26371- if (nx_enabled)
26372+ if (nx_enabled && cpu_has_nx)
26373 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
26374
26375 /* Enable PSE if available */
26376@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
26377 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
26378 * mmio resources as well as potential bios/acpi data regions.
26379 */
26380+
26381 int devmem_is_allowed(unsigned long pagenr)
26382 {
26383+#ifdef CONFIG_GRKERNSEC_KMEM
26384+ /* allow BDA */
26385+ if (!pagenr)
26386+ return 1;
26387+ /* allow EBDA */
26388+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
26389+ return 1;
26390+ /* allow ISA/video mem */
26391+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
26392+ return 1;
26393+ /* throw out everything else below 1MB */
26394+ if (pagenr <= 256)
26395+ return 0;
26396+#else
26397 if (pagenr <= 256)
26398 return 1;
26399+#endif
26400+
26401 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
26402 return 0;
26403 if (!page_is_ram(pagenr))
26404@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
26405
26406 void free_initmem(void)
26407 {
26408+
26409+#ifdef CONFIG_PAX_KERNEXEC
26410+#ifdef CONFIG_X86_32
26411+ /* PaX: limit KERNEL_CS to actual size */
26412+ unsigned long addr, limit;
26413+ struct desc_struct d;
26414+ int cpu;
26415+
26416+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
26417+ limit = (limit - 1UL) >> PAGE_SHIFT;
26418+
26419+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
26420+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26421+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
26422+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
26423+ }
26424+
26425+ /* PaX: make KERNEL_CS read-only */
26426+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
26427+ if (!paravirt_enabled())
26428+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
26429+/*
26430+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
26431+ pgd = pgd_offset_k(addr);
26432+ pud = pud_offset(pgd, addr);
26433+ pmd = pmd_offset(pud, addr);
26434+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26435+ }
26436+*/
26437+#ifdef CONFIG_X86_PAE
26438+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
26439+/*
26440+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
26441+ pgd = pgd_offset_k(addr);
26442+ pud = pud_offset(pgd, addr);
26443+ pmd = pmd_offset(pud, addr);
26444+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
26445+ }
26446+*/
26447+#endif
26448+
26449+#ifdef CONFIG_MODULES
26450+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
26451+#endif
26452+
26453+#else
26454+ pgd_t *pgd;
26455+ pud_t *pud;
26456+ pmd_t *pmd;
26457+ unsigned long addr, end;
26458+
26459+ /* PaX: make kernel code/rodata read-only, rest non-executable */
26460+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
26461+ pgd = pgd_offset_k(addr);
26462+ pud = pud_offset(pgd, addr);
26463+ pmd = pmd_offset(pud, addr);
26464+ if (!pmd_present(*pmd))
26465+ continue;
26466+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
26467+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26468+ else
26469+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
26470+ }
26471+
26472+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
26473+ end = addr + KERNEL_IMAGE_SIZE;
26474+ for (; addr < end; addr += PMD_SIZE) {
26475+ pgd = pgd_offset_k(addr);
26476+ pud = pud_offset(pgd, addr);
26477+ pmd = pmd_offset(pud, addr);
26478+ if (!pmd_present(*pmd))
26479+ continue;
26480+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
26481+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26482+ }
26483+#endif
26484+
26485+ flush_tlb_all();
26486+#endif
26487+
26488 free_init_pages("unused kernel memory",
26489 (unsigned long)(&__init_begin),
26490 (unsigned long)(&__init_end));
26491diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
26492index 30938c1..bda3d5d 100644
26493--- a/arch/x86/mm/init_32.c
26494+++ b/arch/x86/mm/init_32.c
26495@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
26496 }
26497
26498 /*
26499- * Creates a middle page table and puts a pointer to it in the
26500- * given global directory entry. This only returns the gd entry
26501- * in non-PAE compilation mode, since the middle layer is folded.
26502- */
26503-static pmd_t * __init one_md_table_init(pgd_t *pgd)
26504-{
26505- pud_t *pud;
26506- pmd_t *pmd_table;
26507-
26508-#ifdef CONFIG_X86_PAE
26509- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
26510- if (after_bootmem)
26511- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
26512- else
26513- pmd_table = (pmd_t *)alloc_low_page();
26514- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
26515- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
26516- pud = pud_offset(pgd, 0);
26517- BUG_ON(pmd_table != pmd_offset(pud, 0));
26518-
26519- return pmd_table;
26520- }
26521-#endif
26522- pud = pud_offset(pgd, 0);
26523- pmd_table = pmd_offset(pud, 0);
26524-
26525- return pmd_table;
26526-}
26527-
26528-/*
26529 * Create a page table and place a pointer to it in a middle page
26530 * directory entry:
26531 */
26532@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
26533 page_table = (pte_t *)alloc_low_page();
26534
26535 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
26536+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26537+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
26538+#else
26539 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
26540+#endif
26541 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
26542 }
26543
26544 return pte_offset_kernel(pmd, 0);
26545 }
26546
26547+static pmd_t * __init one_md_table_init(pgd_t *pgd)
26548+{
26549+ pud_t *pud;
26550+ pmd_t *pmd_table;
26551+
26552+ pud = pud_offset(pgd, 0);
26553+ pmd_table = pmd_offset(pud, 0);
26554+
26555+ return pmd_table;
26556+}
26557+
26558 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
26559 {
26560 int pgd_idx = pgd_index(vaddr);
26561@@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26562 int pgd_idx, pmd_idx;
26563 unsigned long vaddr;
26564 pgd_t *pgd;
26565+ pud_t *pud;
26566 pmd_t *pmd;
26567 pte_t *pte = NULL;
26568
26569@@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26570 pgd = pgd_base + pgd_idx;
26571
26572 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
26573- pmd = one_md_table_init(pgd);
26574- pmd = pmd + pmd_index(vaddr);
26575+ pud = pud_offset(pgd, vaddr);
26576+ pmd = pmd_offset(pud, vaddr);
26577+
26578+#ifdef CONFIG_X86_PAE
26579+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
26580+#endif
26581+
26582 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
26583 pmd++, pmd_idx++) {
26584 pte = page_table_kmap_check(one_page_table_init(pmd),
26585@@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26586 }
26587 }
26588
26589-static inline int is_kernel_text(unsigned long addr)
26590+static inline int is_kernel_text(unsigned long start, unsigned long end)
26591 {
26592- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
26593- return 1;
26594- return 0;
26595+ if ((start > ktla_ktva((unsigned long)_etext) ||
26596+ end <= ktla_ktva((unsigned long)_stext)) &&
26597+ (start > ktla_ktva((unsigned long)_einittext) ||
26598+ end <= ktla_ktva((unsigned long)_sinittext)) &&
26599+
26600+#ifdef CONFIG_ACPI_SLEEP
26601+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
26602+#endif
26603+
26604+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
26605+ return 0;
26606+ return 1;
26607 }
26608
26609 /*
26610@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned long start,
26611 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
26612 unsigned long start_pfn, end_pfn;
26613 pgd_t *pgd_base = swapper_pg_dir;
26614- int pgd_idx, pmd_idx, pte_ofs;
26615+ unsigned int pgd_idx, pmd_idx, pte_ofs;
26616 unsigned long pfn;
26617 pgd_t *pgd;
26618+ pud_t *pud;
26619 pmd_t *pmd;
26620 pte_t *pte;
26621 unsigned pages_2m, pages_4k;
26622@@ -278,8 +279,13 @@ repeat:
26623 pfn = start_pfn;
26624 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
26625 pgd = pgd_base + pgd_idx;
26626- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
26627- pmd = one_md_table_init(pgd);
26628+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
26629+ pud = pud_offset(pgd, 0);
26630+ pmd = pmd_offset(pud, 0);
26631+
26632+#ifdef CONFIG_X86_PAE
26633+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
26634+#endif
26635
26636 if (pfn >= end_pfn)
26637 continue;
26638@@ -291,14 +297,13 @@ repeat:
26639 #endif
26640 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
26641 pmd++, pmd_idx++) {
26642- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
26643+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
26644
26645 /*
26646 * Map with big pages if possible, otherwise
26647 * create normal page tables:
26648 */
26649 if (use_pse) {
26650- unsigned int addr2;
26651 pgprot_t prot = PAGE_KERNEL_LARGE;
26652 /*
26653 * first pass will use the same initial
26654@@ -308,11 +313,7 @@ repeat:
26655 __pgprot(PTE_IDENT_ATTR |
26656 _PAGE_PSE);
26657
26658- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
26659- PAGE_OFFSET + PAGE_SIZE-1;
26660-
26661- if (is_kernel_text(addr) ||
26662- is_kernel_text(addr2))
26663+ if (is_kernel_text(address, address + PMD_SIZE))
26664 prot = PAGE_KERNEL_LARGE_EXEC;
26665
26666 pages_2m++;
26667@@ -329,7 +330,7 @@ repeat:
26668 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
26669 pte += pte_ofs;
26670 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
26671- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
26672+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
26673 pgprot_t prot = PAGE_KERNEL;
26674 /*
26675 * first pass will use the same initial
26676@@ -337,7 +338,7 @@ repeat:
26677 */
26678 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
26679
26680- if (is_kernel_text(addr))
26681+ if (is_kernel_text(address, address + PAGE_SIZE))
26682 prot = PAGE_KERNEL_EXEC;
26683
26684 pages_4k++;
26685@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
26686
26687 pud = pud_offset(pgd, va);
26688 pmd = pmd_offset(pud, va);
26689- if (!pmd_present(*pmd))
26690+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
26691 break;
26692
26693 pte = pte_offset_kernel(pmd, va);
26694@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_range_init(void)
26695
26696 static void __init pagetable_init(void)
26697 {
26698- pgd_t *pgd_base = swapper_pg_dir;
26699-
26700- permanent_kmaps_init(pgd_base);
26701+ permanent_kmaps_init(swapper_pg_dir);
26702 }
26703
26704 #ifdef CONFIG_ACPI_SLEEP
26705@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
26706 * ACPI suspend needs this for resume, because things like the intel-agp
26707 * driver might have split up a kernel 4MB mapping.
26708 */
26709-char swsusp_pg_dir[PAGE_SIZE]
26710+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
26711 __attribute__ ((aligned(PAGE_SIZE)));
26712
26713 static inline void save_pg_dir(void)
26714 {
26715- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
26716+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
26717 }
26718 #else /* !CONFIG_ACPI_SLEEP */
26719 static inline void save_pg_dir(void)
26720@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
26721 flush_tlb_all();
26722 }
26723
26724-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
26725+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
26726 EXPORT_SYMBOL_GPL(__supported_pte_mask);
26727
26728 /* user-defined highmem size */
26729@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void)
26730 * Initialize the boot-time allocator (with low memory only):
26731 */
26732 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
26733- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
26734+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
26735 PAGE_SIZE);
26736 if (bootmap == -1L)
26737 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
26738@@ -864,6 +863,12 @@ void __init mem_init(void)
26739
26740 pci_iommu_alloc();
26741
26742+#ifdef CONFIG_PAX_PER_CPU_PGD
26743+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
26744+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26745+ KERNEL_PGD_PTRS);
26746+#endif
26747+
26748 #ifdef CONFIG_FLATMEM
26749 BUG_ON(!mem_map);
26750 #endif
26751@@ -881,7 +886,7 @@ void __init mem_init(void)
26752 set_highmem_pages_init();
26753
26754 codesize = (unsigned long) &_etext - (unsigned long) &_text;
26755- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
26756+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
26757 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
26758
26759 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
26760@@ -923,10 +928,10 @@ void __init mem_init(void)
26761 ((unsigned long)&__init_end -
26762 (unsigned long)&__init_begin) >> 10,
26763
26764- (unsigned long)&_etext, (unsigned long)&_edata,
26765- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
26766+ (unsigned long)&_sdata, (unsigned long)&_edata,
26767+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
26768
26769- (unsigned long)&_text, (unsigned long)&_etext,
26770+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
26771 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
26772
26773 /*
26774@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
26775 if (!kernel_set_to_readonly)
26776 return;
26777
26778+ start = ktla_ktva(start);
26779 pr_debug("Set kernel text: %lx - %lx for read write\n",
26780 start, start+size);
26781
26782@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
26783 if (!kernel_set_to_readonly)
26784 return;
26785
26786+ start = ktla_ktva(start);
26787 pr_debug("Set kernel text: %lx - %lx for read only\n",
26788 start, start+size);
26789
26790@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
26791 unsigned long start = PFN_ALIGN(_text);
26792 unsigned long size = PFN_ALIGN(_etext) - start;
26793
26794+ start = ktla_ktva(start);
26795 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
26796 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
26797 size >> 10);
26798diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
26799index 7d095ad..25d2549 100644
26800--- a/arch/x86/mm/init_64.c
26801+++ b/arch/x86/mm/init_64.c
26802@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
26803 pmd = fill_pmd(pud, vaddr);
26804 pte = fill_pte(pmd, vaddr);
26805
26806+ pax_open_kernel();
26807 set_pte(pte, new_pte);
26808+ pax_close_kernel();
26809
26810 /*
26811 * It's enough to flush this one mapping.
26812@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
26813 pgd = pgd_offset_k((unsigned long)__va(phys));
26814 if (pgd_none(*pgd)) {
26815 pud = (pud_t *) spp_getpage();
26816- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
26817- _PAGE_USER));
26818+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
26819 }
26820 pud = pud_offset(pgd, (unsigned long)__va(phys));
26821 if (pud_none(*pud)) {
26822 pmd = (pmd_t *) spp_getpage();
26823- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
26824- _PAGE_USER));
26825+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
26826 }
26827 pmd = pmd_offset(pud, phys);
26828 BUG_ON(!pmd_none(*pmd));
26829@@ -675,6 +675,12 @@ void __init mem_init(void)
26830
26831 pci_iommu_alloc();
26832
26833+#ifdef CONFIG_PAX_PER_CPU_PGD
26834+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
26835+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26836+ KERNEL_PGD_PTRS);
26837+#endif
26838+
26839 /* clear_bss() already clear the empty_zero_page */
26840
26841 reservedpages = 0;
26842@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
26843 static struct vm_area_struct gate_vma = {
26844 .vm_start = VSYSCALL_START,
26845 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
26846- .vm_page_prot = PAGE_READONLY_EXEC,
26847- .vm_flags = VM_READ | VM_EXEC
26848+ .vm_page_prot = PAGE_READONLY,
26849+ .vm_flags = VM_READ
26850 };
26851
26852 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
26853@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
26854
26855 const char *arch_vma_name(struct vm_area_struct *vma)
26856 {
26857- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26858+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26859 return "[vdso]";
26860 if (vma == &gate_vma)
26861 return "[vsyscall]";
26862diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
26863index 84e236c..69bd3f6 100644
26864--- a/arch/x86/mm/iomap_32.c
26865+++ b/arch/x86/mm/iomap_32.c
26866@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
26867 debug_kmap_atomic(type);
26868 idx = type + KM_TYPE_NR * smp_processor_id();
26869 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
26870+
26871+ pax_open_kernel();
26872 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
26873+ pax_close_kernel();
26874+
26875 arch_flush_lazy_mmu_mode();
26876
26877 return (void *)vaddr;
26878diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
26879index 2feb9bd..ab91e7b 100644
26880--- a/arch/x86/mm/ioremap.c
26881+++ b/arch/x86/mm/ioremap.c
26882@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
26883 * Second special case: Some BIOSen report the PC BIOS
26884 * area (640->1Mb) as ram even though it is not.
26885 */
26886- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
26887- pagenr < (BIOS_END >> PAGE_SHIFT))
26888+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
26889+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
26890 return 0;
26891
26892 for (i = 0; i < e820.nr_map; i++) {
26893@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
26894 /*
26895 * Don't allow anybody to remap normal RAM that we're using..
26896 */
26897- for (pfn = phys_addr >> PAGE_SHIFT;
26898- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
26899- pfn++) {
26900-
26901+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
26902 int is_ram = page_is_ram(pfn);
26903
26904- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
26905+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
26906 return NULL;
26907 WARN_ON_ONCE(is_ram);
26908 }
26909@@ -378,6 +375,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
26910
26911 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
26912 if (page_is_ram(start >> PAGE_SHIFT))
26913+#ifdef CONFIG_HIGHMEM
26914+ if ((start >> PAGE_SHIFT) < max_low_pfn)
26915+#endif
26916 return __va(phys);
26917
26918 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
26919@@ -407,7 +407,7 @@ static int __init early_ioremap_debug_setup(char *str)
26920 early_param("early_ioremap_debug", early_ioremap_debug_setup);
26921
26922 static __initdata int after_paging_init;
26923-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
26924+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
26925
26926 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
26927 {
26928@@ -439,8 +439,7 @@ void __init early_ioremap_init(void)
26929 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
26930
26931 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
26932- memset(bm_pte, 0, sizeof(bm_pte));
26933- pmd_populate_kernel(&init_mm, pmd, bm_pte);
26934+ pmd_populate_user(&init_mm, pmd, bm_pte);
26935
26936 /*
26937 * The boot-ioremap range spans multiple pmds, for which
26938diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
26939index 8cc1833..1abbc5b 100644
26940--- a/arch/x86/mm/kmemcheck/kmemcheck.c
26941+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
26942@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
26943 * memory (e.g. tracked pages)? For now, we need this to avoid
26944 * invoking kmemcheck for PnP BIOS calls.
26945 */
26946- if (regs->flags & X86_VM_MASK)
26947+ if (v8086_mode(regs))
26948 return false;
26949- if (regs->cs != __KERNEL_CS)
26950+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
26951 return false;
26952
26953 pte = kmemcheck_pte_lookup(address);
26954diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
26955index c9e57af..07a321b 100644
26956--- a/arch/x86/mm/mmap.c
26957+++ b/arch/x86/mm/mmap.c
26958@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
26959 * Leave an at least ~128 MB hole with possible stack randomization.
26960 */
26961 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
26962-#define MAX_GAP (TASK_SIZE/6*5)
26963+#define MAX_GAP (pax_task_size/6*5)
26964
26965 /*
26966 * True on X86_32 or when emulating IA32 on X86_64
26967@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
26968 return rnd << PAGE_SHIFT;
26969 }
26970
26971-static unsigned long mmap_base(void)
26972+static unsigned long mmap_base(struct mm_struct *mm)
26973 {
26974 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
26975+ unsigned long pax_task_size = TASK_SIZE;
26976+
26977+#ifdef CONFIG_PAX_SEGMEXEC
26978+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26979+ pax_task_size = SEGMEXEC_TASK_SIZE;
26980+#endif
26981
26982 if (gap < MIN_GAP)
26983 gap = MIN_GAP;
26984 else if (gap > MAX_GAP)
26985 gap = MAX_GAP;
26986
26987- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
26988+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
26989 }
26990
26991 /*
26992 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
26993 * does, but not when emulating X86_32
26994 */
26995-static unsigned long mmap_legacy_base(void)
26996+static unsigned long mmap_legacy_base(struct mm_struct *mm)
26997 {
26998- if (mmap_is_ia32())
26999+ if (mmap_is_ia32()) {
27000+
27001+#ifdef CONFIG_PAX_SEGMEXEC
27002+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27003+ return SEGMEXEC_TASK_UNMAPPED_BASE;
27004+ else
27005+#endif
27006+
27007 return TASK_UNMAPPED_BASE;
27008- else
27009+ } else
27010 return TASK_UNMAPPED_BASE + mmap_rnd();
27011 }
27012
27013@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
27014 void arch_pick_mmap_layout(struct mm_struct *mm)
27015 {
27016 if (mmap_is_legacy()) {
27017- mm->mmap_base = mmap_legacy_base();
27018+ mm->mmap_base = mmap_legacy_base(mm);
27019+
27020+#ifdef CONFIG_PAX_RANDMMAP
27021+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27022+ mm->mmap_base += mm->delta_mmap;
27023+#endif
27024+
27025 mm->get_unmapped_area = arch_get_unmapped_area;
27026 mm->unmap_area = arch_unmap_area;
27027 } else {
27028- mm->mmap_base = mmap_base();
27029+ mm->mmap_base = mmap_base(mm);
27030+
27031+#ifdef CONFIG_PAX_RANDMMAP
27032+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27033+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
27034+#endif
27035+
27036 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
27037 mm->unmap_area = arch_unmap_area_topdown;
27038 }
27039diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
27040index 132772a..b961f11 100644
27041--- a/arch/x86/mm/mmio-mod.c
27042+++ b/arch/x86/mm/mmio-mod.c
27043@@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
27044 break;
27045 default:
27046 {
27047- unsigned char *ip = (unsigned char *)instptr;
27048+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
27049 my_trace->opcode = MMIO_UNKNOWN_OP;
27050 my_trace->width = 0;
27051 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
27052@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
27053 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
27054 void __iomem *addr)
27055 {
27056- static atomic_t next_id;
27057+ static atomic_unchecked_t next_id;
27058 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
27059 /* These are page-unaligned. */
27060 struct mmiotrace_map map = {
27061@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
27062 .private = trace
27063 },
27064 .phys = offset,
27065- .id = atomic_inc_return(&next_id)
27066+ .id = atomic_inc_return_unchecked(&next_id)
27067 };
27068 map.map_id = trace->id;
27069
27070diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
27071index d253006..e56dd6a 100644
27072--- a/arch/x86/mm/numa_32.c
27073+++ b/arch/x86/mm/numa_32.c
27074@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
27075 }
27076 #endif
27077
27078-extern unsigned long find_max_low_pfn(void);
27079 extern unsigned long highend_pfn, highstart_pfn;
27080
27081 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
27082diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
27083index e1d1069..2251ff3 100644
27084--- a/arch/x86/mm/pageattr-test.c
27085+++ b/arch/x86/mm/pageattr-test.c
27086@@ -36,7 +36,7 @@ enum {
27087
27088 static int pte_testbit(pte_t pte)
27089 {
27090- return pte_flags(pte) & _PAGE_UNUSED1;
27091+ return pte_flags(pte) & _PAGE_CPA_TEST;
27092 }
27093
27094 struct split_state {
27095diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
27096index dd38bfb..b72c63e 100644
27097--- a/arch/x86/mm/pageattr.c
27098+++ b/arch/x86/mm/pageattr.c
27099@@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
27100 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
27101 */
27102 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
27103- pgprot_val(forbidden) |= _PAGE_NX;
27104+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
27105
27106 /*
27107 * The kernel text needs to be executable for obvious reasons
27108 * Does not cover __inittext since that is gone later on. On
27109 * 64bit we do not enforce !NX on the low mapping
27110 */
27111- if (within(address, (unsigned long)_text, (unsigned long)_etext))
27112- pgprot_val(forbidden) |= _PAGE_NX;
27113+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
27114+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
27115
27116+#ifdef CONFIG_DEBUG_RODATA
27117 /*
27118 * The .rodata section needs to be read-only. Using the pfn
27119 * catches all aliases.
27120@@ -278,6 +279,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
27121 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
27122 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
27123 pgprot_val(forbidden) |= _PAGE_RW;
27124+#endif
27125+
27126+#ifdef CONFIG_PAX_KERNEXEC
27127+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
27128+ pgprot_val(forbidden) |= _PAGE_RW;
27129+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
27130+ }
27131+#endif
27132
27133 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
27134
27135@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
27136 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
27137 {
27138 /* change init_mm */
27139+ pax_open_kernel();
27140 set_pte_atomic(kpte, pte);
27141+
27142 #ifdef CONFIG_X86_32
27143 if (!SHARED_KERNEL_PMD) {
27144+
27145+#ifdef CONFIG_PAX_PER_CPU_PGD
27146+ unsigned long cpu;
27147+#else
27148 struct page *page;
27149+#endif
27150
27151+#ifdef CONFIG_PAX_PER_CPU_PGD
27152+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
27153+ pgd_t *pgd = get_cpu_pgd(cpu);
27154+#else
27155 list_for_each_entry(page, &pgd_list, lru) {
27156- pgd_t *pgd;
27157+ pgd_t *pgd = (pgd_t *)page_address(page);
27158+#endif
27159+
27160 pud_t *pud;
27161 pmd_t *pmd;
27162
27163- pgd = (pgd_t *)page_address(page) + pgd_index(address);
27164+ pgd += pgd_index(address);
27165 pud = pud_offset(pgd, address);
27166 pmd = pmd_offset(pud, address);
27167 set_pte_atomic((pte_t *)pmd, pte);
27168 }
27169 }
27170 #endif
27171+ pax_close_kernel();
27172 }
27173
27174 static int
27175diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
27176index e78cd0e..de0a817 100644
27177--- a/arch/x86/mm/pat.c
27178+++ b/arch/x86/mm/pat.c
27179@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
27180
27181 conflict:
27182 printk(KERN_INFO "%s:%d conflicting memory types "
27183- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
27184+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
27185 new->end, cattr_name(new->type), cattr_name(entry->type));
27186 return -EBUSY;
27187 }
27188@@ -559,7 +559,7 @@ unlock_ret:
27189
27190 if (err) {
27191 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
27192- current->comm, current->pid, start, end);
27193+ current->comm, task_pid_nr(current), start, end);
27194 }
27195
27196 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
27197@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27198 while (cursor < to) {
27199 if (!devmem_is_allowed(pfn)) {
27200 printk(KERN_INFO
27201- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27202- current->comm, from, to);
27203+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
27204+ current->comm, from, to, cursor);
27205 return 0;
27206 }
27207 cursor += PAGE_SIZE;
27208@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
27209 printk(KERN_INFO
27210 "%s:%d ioremap_change_attr failed %s "
27211 "for %Lx-%Lx\n",
27212- current->comm, current->pid,
27213+ current->comm, task_pid_nr(current),
27214 cattr_name(flags),
27215 base, (unsigned long long)(base + size));
27216 return -EINVAL;
27217@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
27218 free_memtype(paddr, paddr + size);
27219 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
27220 " for %Lx-%Lx, got %s\n",
27221- current->comm, current->pid,
27222+ current->comm, task_pid_nr(current),
27223 cattr_name(want_flags),
27224 (unsigned long long)paddr,
27225 (unsigned long long)(paddr + size),
27226diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
27227index df3d5c8..c2223e1 100644
27228--- a/arch/x86/mm/pf_in.c
27229+++ b/arch/x86/mm/pf_in.c
27230@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
27231 int i;
27232 enum reason_type rv = OTHERS;
27233
27234- p = (unsigned char *)ins_addr;
27235+ p = (unsigned char *)ktla_ktva(ins_addr);
27236 p += skip_prefix(p, &prf);
27237 p += get_opcode(p, &opcode);
27238
27239@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
27240 struct prefix_bits prf;
27241 int i;
27242
27243- p = (unsigned char *)ins_addr;
27244+ p = (unsigned char *)ktla_ktva(ins_addr);
27245 p += skip_prefix(p, &prf);
27246 p += get_opcode(p, &opcode);
27247
27248@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
27249 struct prefix_bits prf;
27250 int i;
27251
27252- p = (unsigned char *)ins_addr;
27253+ p = (unsigned char *)ktla_ktva(ins_addr);
27254 p += skip_prefix(p, &prf);
27255 p += get_opcode(p, &opcode);
27256
27257@@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
27258 int i;
27259 unsigned long rv;
27260
27261- p = (unsigned char *)ins_addr;
27262+ p = (unsigned char *)ktla_ktva(ins_addr);
27263 p += skip_prefix(p, &prf);
27264 p += get_opcode(p, &opcode);
27265 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
27266@@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
27267 int i;
27268 unsigned long rv;
27269
27270- p = (unsigned char *)ins_addr;
27271+ p = (unsigned char *)ktla_ktva(ins_addr);
27272 p += skip_prefix(p, &prf);
27273 p += get_opcode(p, &opcode);
27274 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
27275diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
27276index e0e6fad..c56b495 100644
27277--- a/arch/x86/mm/pgtable.c
27278+++ b/arch/x86/mm/pgtable.c
27279@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *pgd)
27280 list_del(&page->lru);
27281 }
27282
27283-#define UNSHARED_PTRS_PER_PGD \
27284- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
27285+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27286+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
27287
27288+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
27289+{
27290+ while (count--)
27291+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
27292+}
27293+#endif
27294+
27295+#ifdef CONFIG_PAX_PER_CPU_PGD
27296+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
27297+{
27298+ while (count--)
27299+
27300+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27301+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
27302+#else
27303+ *dst++ = *src++;
27304+#endif
27305+
27306+}
27307+#endif
27308+
27309+#ifdef CONFIG_X86_64
27310+#define pxd_t pud_t
27311+#define pyd_t pgd_t
27312+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
27313+#define pxd_free(mm, pud) pud_free((mm), (pud))
27314+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
27315+#define pyd_offset(mm, address) pgd_offset((mm), (address))
27316+#define PYD_SIZE PGDIR_SIZE
27317+#else
27318+#define pxd_t pmd_t
27319+#define pyd_t pud_t
27320+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
27321+#define pxd_free(mm, pud) pmd_free((mm), (pud))
27322+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
27323+#define pyd_offset(mm, address) pud_offset((mm), (address))
27324+#define PYD_SIZE PUD_SIZE
27325+#endif
27326+
27327+#ifdef CONFIG_PAX_PER_CPU_PGD
27328+static inline void pgd_ctor(pgd_t *pgd) {}
27329+static inline void pgd_dtor(pgd_t *pgd) {}
27330+#else
27331 static void pgd_ctor(pgd_t *pgd)
27332 {
27333 /* If the pgd points to a shared pagetable level (either the
27334@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
27335 pgd_list_del(pgd);
27336 spin_unlock_irqrestore(&pgd_lock, flags);
27337 }
27338+#endif
27339
27340 /*
27341 * List of all pgd's needed for non-PAE so it can invalidate entries
27342@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
27343 * -- wli
27344 */
27345
27346-#ifdef CONFIG_X86_PAE
27347+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
27348 /*
27349 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
27350 * updating the top-level pagetable entries to guarantee the
27351@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
27352 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
27353 * and initialize the kernel pmds here.
27354 */
27355-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
27356+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
27357
27358 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
27359 {
27360@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
27361 */
27362 flush_tlb_mm(mm);
27363 }
27364+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
27365+#define PREALLOCATED_PXDS USER_PGD_PTRS
27366 #else /* !CONFIG_X86_PAE */
27367
27368 /* No need to prepopulate any pagetable entries in non-PAE modes. */
27369-#define PREALLOCATED_PMDS 0
27370+#define PREALLOCATED_PXDS 0
27371
27372 #endif /* CONFIG_X86_PAE */
27373
27374-static void free_pmds(pmd_t *pmds[])
27375+static void free_pxds(pxd_t *pxds[])
27376 {
27377 int i;
27378
27379- for(i = 0; i < PREALLOCATED_PMDS; i++)
27380- if (pmds[i])
27381- free_page((unsigned long)pmds[i]);
27382+ for(i = 0; i < PREALLOCATED_PXDS; i++)
27383+ if (pxds[i])
27384+ free_page((unsigned long)pxds[i]);
27385 }
27386
27387-static int preallocate_pmds(pmd_t *pmds[])
27388+static int preallocate_pxds(pxd_t *pxds[])
27389 {
27390 int i;
27391 bool failed = false;
27392
27393- for(i = 0; i < PREALLOCATED_PMDS; i++) {
27394- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
27395- if (pmd == NULL)
27396+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
27397+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
27398+ if (pxd == NULL)
27399 failed = true;
27400- pmds[i] = pmd;
27401+ pxds[i] = pxd;
27402 }
27403
27404 if (failed) {
27405- free_pmds(pmds);
27406+ free_pxds(pxds);
27407 return -ENOMEM;
27408 }
27409
27410@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[])
27411 * preallocate which never got a corresponding vma will need to be
27412 * freed manually.
27413 */
27414-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
27415+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
27416 {
27417 int i;
27418
27419- for(i = 0; i < PREALLOCATED_PMDS; i++) {
27420+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
27421 pgd_t pgd = pgdp[i];
27422
27423 if (pgd_val(pgd) != 0) {
27424- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
27425+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
27426
27427- pgdp[i] = native_make_pgd(0);
27428+ set_pgd(pgdp + i, native_make_pgd(0));
27429
27430- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
27431- pmd_free(mm, pmd);
27432+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
27433+ pxd_free(mm, pxd);
27434 }
27435 }
27436 }
27437
27438-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
27439+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
27440 {
27441- pud_t *pud;
27442+ pyd_t *pyd;
27443 unsigned long addr;
27444 int i;
27445
27446- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
27447+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
27448 return;
27449
27450- pud = pud_offset(pgd, 0);
27451+#ifdef CONFIG_X86_64
27452+ pyd = pyd_offset(mm, 0L);
27453+#else
27454+ pyd = pyd_offset(pgd, 0L);
27455+#endif
27456
27457- for (addr = i = 0; i < PREALLOCATED_PMDS;
27458- i++, pud++, addr += PUD_SIZE) {
27459- pmd_t *pmd = pmds[i];
27460+ for (addr = i = 0; i < PREALLOCATED_PXDS;
27461+ i++, pyd++, addr += PYD_SIZE) {
27462+ pxd_t *pxd = pxds[i];
27463
27464 if (i >= KERNEL_PGD_BOUNDARY)
27465- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
27466- sizeof(pmd_t) * PTRS_PER_PMD);
27467+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
27468+ sizeof(pxd_t) * PTRS_PER_PMD);
27469
27470- pud_populate(mm, pud, pmd);
27471+ pyd_populate(mm, pyd, pxd);
27472 }
27473 }
27474
27475 pgd_t *pgd_alloc(struct mm_struct *mm)
27476 {
27477 pgd_t *pgd;
27478- pmd_t *pmds[PREALLOCATED_PMDS];
27479+ pxd_t *pxds[PREALLOCATED_PXDS];
27480+
27481 unsigned long flags;
27482
27483 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
27484@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
27485
27486 mm->pgd = pgd;
27487
27488- if (preallocate_pmds(pmds) != 0)
27489+ if (preallocate_pxds(pxds) != 0)
27490 goto out_free_pgd;
27491
27492 if (paravirt_pgd_alloc(mm) != 0)
27493- goto out_free_pmds;
27494+ goto out_free_pxds;
27495
27496 /*
27497 * Make sure that pre-populating the pmds is atomic with
27498@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
27499 spin_lock_irqsave(&pgd_lock, flags);
27500
27501 pgd_ctor(pgd);
27502- pgd_prepopulate_pmd(mm, pgd, pmds);
27503+ pgd_prepopulate_pxd(mm, pgd, pxds);
27504
27505 spin_unlock_irqrestore(&pgd_lock, flags);
27506
27507 return pgd;
27508
27509-out_free_pmds:
27510- free_pmds(pmds);
27511+out_free_pxds:
27512+ free_pxds(pxds);
27513 out_free_pgd:
27514 free_page((unsigned long)pgd);
27515 out:
27516@@ -287,7 +338,7 @@ out:
27517
27518 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
27519 {
27520- pgd_mop_up_pmds(mm, pgd);
27521+ pgd_mop_up_pxds(mm, pgd);
27522 pgd_dtor(pgd);
27523 paravirt_pgd_free(mm, pgd);
27524 free_page((unsigned long)pgd);
27525diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
27526index 46c8834..fcab43d 100644
27527--- a/arch/x86/mm/pgtable_32.c
27528+++ b/arch/x86/mm/pgtable_32.c
27529@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
27530 return;
27531 }
27532 pte = pte_offset_kernel(pmd, vaddr);
27533+
27534+ pax_open_kernel();
27535 if (pte_val(pteval))
27536 set_pte_at(&init_mm, vaddr, pte, pteval);
27537 else
27538 pte_clear(&init_mm, vaddr, pte);
27539+ pax_close_kernel();
27540
27541 /*
27542 * It's enough to flush this one mapping.
27543diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
27544index 513d8ed..978c161 100644
27545--- a/arch/x86/mm/setup_nx.c
27546+++ b/arch/x86/mm/setup_nx.c
27547@@ -4,11 +4,10 @@
27548
27549 #include <asm/pgtable.h>
27550
27551+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
27552 int nx_enabled;
27553
27554-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27555-static int disable_nx __cpuinitdata;
27556-
27557+#ifndef CONFIG_PAX_PAGEEXEC
27558 /*
27559 * noexec = on|off
27560 *
27561@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str)
27562 if (!str)
27563 return -EINVAL;
27564 if (!strncmp(str, "on", 2)) {
27565- __supported_pte_mask |= _PAGE_NX;
27566- disable_nx = 0;
27567+ nx_enabled = 1;
27568 } else if (!strncmp(str, "off", 3)) {
27569- disable_nx = 1;
27570- __supported_pte_mask &= ~_PAGE_NX;
27571+ nx_enabled = 0;
27572 }
27573 return 0;
27574 }
27575 early_param("noexec", noexec_setup);
27576 #endif
27577+#endif
27578
27579 #ifdef CONFIG_X86_PAE
27580 void __init set_nx(void)
27581 {
27582- unsigned int v[4], l, h;
27583+ if (!nx_enabled && cpu_has_nx) {
27584+ unsigned l, h;
27585
27586- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
27587- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
27588-
27589- if ((v[3] & (1 << 20)) && !disable_nx) {
27590- rdmsr(MSR_EFER, l, h);
27591- l |= EFER_NX;
27592- wrmsr(MSR_EFER, l, h);
27593- nx_enabled = 1;
27594- __supported_pte_mask |= _PAGE_NX;
27595- }
27596+ __supported_pte_mask &= ~_PAGE_NX;
27597+ rdmsr(MSR_EFER, l, h);
27598+ l &= ~EFER_NX;
27599+ wrmsr(MSR_EFER, l, h);
27600 }
27601 }
27602 #else
27603@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
27604 unsigned long efer;
27605
27606 rdmsrl(MSR_EFER, efer);
27607- if (!(efer & EFER_NX) || disable_nx)
27608+ if (!(efer & EFER_NX) || !nx_enabled)
27609 __supported_pte_mask &= ~_PAGE_NX;
27610 }
27611 #endif
27612diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
27613index 36fe08e..b123d3a 100644
27614--- a/arch/x86/mm/tlb.c
27615+++ b/arch/x86/mm/tlb.c
27616@@ -61,7 +61,11 @@ void leave_mm(int cpu)
27617 BUG();
27618 cpumask_clear_cpu(cpu,
27619 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
27620+
27621+#ifndef CONFIG_PAX_PER_CPU_PGD
27622 load_cr3(swapper_pg_dir);
27623+#endif
27624+
27625 }
27626 EXPORT_SYMBOL_GPL(leave_mm);
27627
27628diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
27629index 829edf0..672adb3 100644
27630--- a/arch/x86/oprofile/backtrace.c
27631+++ b/arch/x86/oprofile/backtrace.c
27632@@ -115,7 +115,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
27633 {
27634 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
27635
27636- if (!user_mode_vm(regs)) {
27637+ if (!user_mode(regs)) {
27638 unsigned long stack = kernel_stack_pointer(regs);
27639 if (depth)
27640 dump_trace(NULL, regs, (unsigned long *)stack, 0,
27641diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
27642index e6a160a..36deff6 100644
27643--- a/arch/x86/oprofile/op_model_p4.c
27644+++ b/arch/x86/oprofile/op_model_p4.c
27645@@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
27646 #endif
27647 }
27648
27649-static int inline addr_increment(void)
27650+static inline int addr_increment(void)
27651 {
27652 #ifdef CONFIG_SMP
27653 return smp_num_siblings == 2 ? 2 : 1;
27654diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
27655index 1331fcf..03901b2 100644
27656--- a/arch/x86/pci/common.c
27657+++ b/arch/x86/pci/common.c
27658@@ -31,8 +31,8 @@ int noioapicreroute = 1;
27659 int pcibios_last_bus = -1;
27660 unsigned long pirq_table_addr;
27661 struct pci_bus *pci_root_bus;
27662-struct pci_raw_ops *raw_pci_ops;
27663-struct pci_raw_ops *raw_pci_ext_ops;
27664+const struct pci_raw_ops *raw_pci_ops;
27665+const struct pci_raw_ops *raw_pci_ext_ops;
27666
27667 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
27668 int reg, int len, u32 *val)
27669diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
27670index 347d882..4baf6b6 100644
27671--- a/arch/x86/pci/direct.c
27672+++ b/arch/x86/pci/direct.c
27673@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
27674
27675 #undef PCI_CONF1_ADDRESS
27676
27677-struct pci_raw_ops pci_direct_conf1 = {
27678+const struct pci_raw_ops pci_direct_conf1 = {
27679 .read = pci_conf1_read,
27680 .write = pci_conf1_write,
27681 };
27682@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
27683
27684 #undef PCI_CONF2_ADDRESS
27685
27686-struct pci_raw_ops pci_direct_conf2 = {
27687+const struct pci_raw_ops pci_direct_conf2 = {
27688 .read = pci_conf2_read,
27689 .write = pci_conf2_write,
27690 };
27691@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
27692 * This should be close to trivial, but it isn't, because there are buggy
27693 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
27694 */
27695-static int __init pci_sanity_check(struct pci_raw_ops *o)
27696+static int __init pci_sanity_check(const struct pci_raw_ops *o)
27697 {
27698 u32 x = 0;
27699 int year, devfn;
27700diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
27701index f10a7e9..0425342 100644
27702--- a/arch/x86/pci/mmconfig_32.c
27703+++ b/arch/x86/pci/mmconfig_32.c
27704@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
27705 return 0;
27706 }
27707
27708-static struct pci_raw_ops pci_mmcfg = {
27709+static const struct pci_raw_ops pci_mmcfg = {
27710 .read = pci_mmcfg_read,
27711 .write = pci_mmcfg_write,
27712 };
27713diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
27714index 94349f8..41600a7 100644
27715--- a/arch/x86/pci/mmconfig_64.c
27716+++ b/arch/x86/pci/mmconfig_64.c
27717@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
27718 return 0;
27719 }
27720
27721-static struct pci_raw_ops pci_mmcfg = {
27722+static const struct pci_raw_ops pci_mmcfg = {
27723 .read = pci_mmcfg_read,
27724 .write = pci_mmcfg_write,
27725 };
27726diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
27727index 8eb295e..86bd657 100644
27728--- a/arch/x86/pci/numaq_32.c
27729+++ b/arch/x86/pci/numaq_32.c
27730@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
27731
27732 #undef PCI_CONF1_MQ_ADDRESS
27733
27734-static struct pci_raw_ops pci_direct_conf1_mq = {
27735+static const struct pci_raw_ops pci_direct_conf1_mq = {
27736 .read = pci_conf1_mq_read,
27737 .write = pci_conf1_mq_write
27738 };
27739diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
27740index b889d82..5a58a0a 100644
27741--- a/arch/x86/pci/olpc.c
27742+++ b/arch/x86/pci/olpc.c
27743@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
27744 return 0;
27745 }
27746
27747-static struct pci_raw_ops pci_olpc_conf = {
27748+static const struct pci_raw_ops pci_olpc_conf = {
27749 .read = pci_olpc_read,
27750 .write = pci_olpc_write,
27751 };
27752diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
27753index 1c975cc..b8e16c2 100644
27754--- a/arch/x86/pci/pcbios.c
27755+++ b/arch/x86/pci/pcbios.c
27756@@ -56,50 +56,93 @@ union bios32 {
27757 static struct {
27758 unsigned long address;
27759 unsigned short segment;
27760-} bios32_indirect = { 0, __KERNEL_CS };
27761+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
27762
27763 /*
27764 * Returns the entry point for the given service, NULL on error
27765 */
27766
27767-static unsigned long bios32_service(unsigned long service)
27768+static unsigned long __devinit bios32_service(unsigned long service)
27769 {
27770 unsigned char return_code; /* %al */
27771 unsigned long address; /* %ebx */
27772 unsigned long length; /* %ecx */
27773 unsigned long entry; /* %edx */
27774 unsigned long flags;
27775+ struct desc_struct d, *gdt;
27776
27777 local_irq_save(flags);
27778- __asm__("lcall *(%%edi); cld"
27779+
27780+ gdt = get_cpu_gdt_table(smp_processor_id());
27781+
27782+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
27783+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
27784+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
27785+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
27786+
27787+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
27788 : "=a" (return_code),
27789 "=b" (address),
27790 "=c" (length),
27791 "=d" (entry)
27792 : "0" (service),
27793 "1" (0),
27794- "D" (&bios32_indirect));
27795+ "D" (&bios32_indirect),
27796+ "r"(__PCIBIOS_DS)
27797+ : "memory");
27798+
27799+ pax_open_kernel();
27800+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
27801+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
27802+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
27803+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
27804+ pax_close_kernel();
27805+
27806 local_irq_restore(flags);
27807
27808 switch (return_code) {
27809- case 0:
27810- return address + entry;
27811- case 0x80: /* Not present */
27812- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
27813- return 0;
27814- default: /* Shouldn't happen */
27815- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
27816- service, return_code);
27817+ case 0: {
27818+ int cpu;
27819+ unsigned char flags;
27820+
27821+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
27822+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
27823+ printk(KERN_WARNING "bios32_service: not valid\n");
27824 return 0;
27825+ }
27826+ address = address + PAGE_OFFSET;
27827+ length += 16UL; /* some BIOSs underreport this... */
27828+ flags = 4;
27829+ if (length >= 64*1024*1024) {
27830+ length >>= PAGE_SHIFT;
27831+ flags |= 8;
27832+ }
27833+
27834+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
27835+ gdt = get_cpu_gdt_table(cpu);
27836+ pack_descriptor(&d, address, length, 0x9b, flags);
27837+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
27838+ pack_descriptor(&d, address, length, 0x93, flags);
27839+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
27840+ }
27841+ return entry;
27842+ }
27843+ case 0x80: /* Not present */
27844+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
27845+ return 0;
27846+ default: /* Shouldn't happen */
27847+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
27848+ service, return_code);
27849+ return 0;
27850 }
27851 }
27852
27853 static struct {
27854 unsigned long address;
27855 unsigned short segment;
27856-} pci_indirect = { 0, __KERNEL_CS };
27857+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
27858
27859-static int pci_bios_present;
27860+static int pci_bios_present __read_only;
27861
27862 static int __devinit check_pcibios(void)
27863 {
27864@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
27865 unsigned long flags, pcibios_entry;
27866
27867 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
27868- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
27869+ pci_indirect.address = pcibios_entry;
27870
27871 local_irq_save(flags);
27872- __asm__(
27873- "lcall *(%%edi); cld\n\t"
27874+ __asm__("movw %w6, %%ds\n\t"
27875+ "lcall *%%ss:(%%edi); cld\n\t"
27876+ "push %%ss\n\t"
27877+ "pop %%ds\n\t"
27878 "jc 1f\n\t"
27879 "xor %%ah, %%ah\n"
27880 "1:"
27881@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
27882 "=b" (ebx),
27883 "=c" (ecx)
27884 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
27885- "D" (&pci_indirect)
27886+ "D" (&pci_indirect),
27887+ "r" (__PCIBIOS_DS)
27888 : "memory");
27889 local_irq_restore(flags);
27890
27891@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27892
27893 switch (len) {
27894 case 1:
27895- __asm__("lcall *(%%esi); cld\n\t"
27896+ __asm__("movw %w6, %%ds\n\t"
27897+ "lcall *%%ss:(%%esi); cld\n\t"
27898+ "push %%ss\n\t"
27899+ "pop %%ds\n\t"
27900 "jc 1f\n\t"
27901 "xor %%ah, %%ah\n"
27902 "1:"
27903@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27904 : "1" (PCIBIOS_READ_CONFIG_BYTE),
27905 "b" (bx),
27906 "D" ((long)reg),
27907- "S" (&pci_indirect));
27908+ "S" (&pci_indirect),
27909+ "r" (__PCIBIOS_DS));
27910 /*
27911 * Zero-extend the result beyond 8 bits, do not trust the
27912 * BIOS having done it:
27913@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27914 *value &= 0xff;
27915 break;
27916 case 2:
27917- __asm__("lcall *(%%esi); cld\n\t"
27918+ __asm__("movw %w6, %%ds\n\t"
27919+ "lcall *%%ss:(%%esi); cld\n\t"
27920+ "push %%ss\n\t"
27921+ "pop %%ds\n\t"
27922 "jc 1f\n\t"
27923 "xor %%ah, %%ah\n"
27924 "1:"
27925@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27926 : "1" (PCIBIOS_READ_CONFIG_WORD),
27927 "b" (bx),
27928 "D" ((long)reg),
27929- "S" (&pci_indirect));
27930+ "S" (&pci_indirect),
27931+ "r" (__PCIBIOS_DS));
27932 /*
27933 * Zero-extend the result beyond 16 bits, do not trust the
27934 * BIOS having done it:
27935@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27936 *value &= 0xffff;
27937 break;
27938 case 4:
27939- __asm__("lcall *(%%esi); cld\n\t"
27940+ __asm__("movw %w6, %%ds\n\t"
27941+ "lcall *%%ss:(%%esi); cld\n\t"
27942+ "push %%ss\n\t"
27943+ "pop %%ds\n\t"
27944 "jc 1f\n\t"
27945 "xor %%ah, %%ah\n"
27946 "1:"
27947@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27948 : "1" (PCIBIOS_READ_CONFIG_DWORD),
27949 "b" (bx),
27950 "D" ((long)reg),
27951- "S" (&pci_indirect));
27952+ "S" (&pci_indirect),
27953+ "r" (__PCIBIOS_DS));
27954 break;
27955 }
27956
27957@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27958
27959 switch (len) {
27960 case 1:
27961- __asm__("lcall *(%%esi); cld\n\t"
27962+ __asm__("movw %w6, %%ds\n\t"
27963+ "lcall *%%ss:(%%esi); cld\n\t"
27964+ "push %%ss\n\t"
27965+ "pop %%ds\n\t"
27966 "jc 1f\n\t"
27967 "xor %%ah, %%ah\n"
27968 "1:"
27969@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27970 "c" (value),
27971 "b" (bx),
27972 "D" ((long)reg),
27973- "S" (&pci_indirect));
27974+ "S" (&pci_indirect),
27975+ "r" (__PCIBIOS_DS));
27976 break;
27977 case 2:
27978- __asm__("lcall *(%%esi); cld\n\t"
27979+ __asm__("movw %w6, %%ds\n\t"
27980+ "lcall *%%ss:(%%esi); cld\n\t"
27981+ "push %%ss\n\t"
27982+ "pop %%ds\n\t"
27983 "jc 1f\n\t"
27984 "xor %%ah, %%ah\n"
27985 "1:"
27986@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27987 "c" (value),
27988 "b" (bx),
27989 "D" ((long)reg),
27990- "S" (&pci_indirect));
27991+ "S" (&pci_indirect),
27992+ "r" (__PCIBIOS_DS));
27993 break;
27994 case 4:
27995- __asm__("lcall *(%%esi); cld\n\t"
27996+ __asm__("movw %w6, %%ds\n\t"
27997+ "lcall *%%ss:(%%esi); cld\n\t"
27998+ "push %%ss\n\t"
27999+ "pop %%ds\n\t"
28000 "jc 1f\n\t"
28001 "xor %%ah, %%ah\n"
28002 "1:"
28003@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
28004 "c" (value),
28005 "b" (bx),
28006 "D" ((long)reg),
28007- "S" (&pci_indirect));
28008+ "S" (&pci_indirect),
28009+ "r" (__PCIBIOS_DS));
28010 break;
28011 }
28012
28013@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
28014 * Function table for BIOS32 access
28015 */
28016
28017-static struct pci_raw_ops pci_bios_access = {
28018+static const struct pci_raw_ops pci_bios_access = {
28019 .read = pci_bios_read,
28020 .write = pci_bios_write
28021 };
28022@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_access = {
28023 * Try to find PCI BIOS.
28024 */
28025
28026-static struct pci_raw_ops * __devinit pci_find_bios(void)
28027+static const struct pci_raw_ops * __devinit pci_find_bios(void)
28028 {
28029 union bios32 *check;
28030 unsigned char sum;
28031@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
28032
28033 DBG("PCI: Fetching IRQ routing table... ");
28034 __asm__("push %%es\n\t"
28035+ "movw %w8, %%ds\n\t"
28036 "push %%ds\n\t"
28037 "pop %%es\n\t"
28038- "lcall *(%%esi); cld\n\t"
28039+ "lcall *%%ss:(%%esi); cld\n\t"
28040 "pop %%es\n\t"
28041+ "push %%ss\n\t"
28042+ "pop %%ds\n"
28043 "jc 1f\n\t"
28044 "xor %%ah, %%ah\n"
28045 "1:"
28046@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
28047 "1" (0),
28048 "D" ((long) &opt),
28049 "S" (&pci_indirect),
28050- "m" (opt)
28051+ "m" (opt),
28052+ "r" (__PCIBIOS_DS)
28053 : "memory");
28054 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
28055 if (ret & 0xff00)
28056@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
28057 {
28058 int ret;
28059
28060- __asm__("lcall *(%%esi); cld\n\t"
28061+ __asm__("movw %w5, %%ds\n\t"
28062+ "lcall *%%ss:(%%esi); cld\n\t"
28063+ "push %%ss\n\t"
28064+ "pop %%ds\n"
28065 "jc 1f\n\t"
28066 "xor %%ah, %%ah\n"
28067 "1:"
28068@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
28069 : "0" (PCIBIOS_SET_PCI_HW_INT),
28070 "b" ((dev->bus->number << 8) | dev->devfn),
28071 "c" ((irq << 8) | (pin + 10)),
28072- "S" (&pci_indirect));
28073+ "S" (&pci_indirect),
28074+ "r" (__PCIBIOS_DS));
28075 return !(ret & 0xff00);
28076 }
28077 EXPORT_SYMBOL(pcibios_set_irq_routing);
28078diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
28079index fa0f651..9d8f3d9 100644
28080--- a/arch/x86/power/cpu.c
28081+++ b/arch/x86/power/cpu.c
28082@@ -129,7 +129,7 @@ static void do_fpu_end(void)
28083 static void fix_processor_context(void)
28084 {
28085 int cpu = smp_processor_id();
28086- struct tss_struct *t = &per_cpu(init_tss, cpu);
28087+ struct tss_struct *t = init_tss + cpu;
28088
28089 set_tss_desc(cpu, t); /*
28090 * This just modifies memory; should not be
28091@@ -139,7 +139,9 @@ static void fix_processor_context(void)
28092 */
28093
28094 #ifdef CONFIG_X86_64
28095+ pax_open_kernel();
28096 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
28097+ pax_close_kernel();
28098
28099 syscall_init(); /* This sets MSR_*STAR and related */
28100 #endif
28101diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
28102index dd78ef6..f9d928d 100644
28103--- a/arch/x86/vdso/Makefile
28104+++ b/arch/x86/vdso/Makefile
28105@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
28106 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
28107 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
28108
28109-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
28110+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
28111 GCOV_PROFILE := n
28112
28113 #
28114diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
28115index ee55754..0013b2e 100644
28116--- a/arch/x86/vdso/vclock_gettime.c
28117+++ b/arch/x86/vdso/vclock_gettime.c
28118@@ -22,24 +22,48 @@
28119 #include <asm/hpet.h>
28120 #include <asm/unistd.h>
28121 #include <asm/io.h>
28122+#include <asm/fixmap.h>
28123 #include "vextern.h"
28124
28125 #define gtod vdso_vsyscall_gtod_data
28126
28127+notrace noinline long __vdso_fallback_time(long *t)
28128+{
28129+ long secs;
28130+ asm volatile("syscall"
28131+ : "=a" (secs)
28132+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
28133+ return secs;
28134+}
28135+
28136 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
28137 {
28138 long ret;
28139 asm("syscall" : "=a" (ret) :
28140- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
28141+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
28142 return ret;
28143 }
28144
28145+notrace static inline cycle_t __vdso_vread_hpet(void)
28146+{
28147+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
28148+}
28149+
28150+notrace static inline cycle_t __vdso_vread_tsc(void)
28151+{
28152+ cycle_t ret = (cycle_t)vget_cycles();
28153+
28154+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
28155+}
28156+
28157 notrace static inline long vgetns(void)
28158 {
28159 long v;
28160- cycles_t (*vread)(void);
28161- vread = gtod->clock.vread;
28162- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
28163+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
28164+ v = __vdso_vread_tsc();
28165+ else
28166+ v = __vdso_vread_hpet();
28167+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
28168 return (v * gtod->clock.mult) >> gtod->clock.shift;
28169 }
28170
28171@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
28172
28173 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
28174 {
28175- if (likely(gtod->sysctl_enabled))
28176+ if (likely(gtod->sysctl_enabled &&
28177+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
28178+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
28179 switch (clock) {
28180 case CLOCK_REALTIME:
28181 if (likely(gtod->clock.vread))
28182@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
28183 int clock_gettime(clockid_t, struct timespec *)
28184 __attribute__((weak, alias("__vdso_clock_gettime")));
28185
28186+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
28187+{
28188+ long ret;
28189+ asm("syscall" : "=a" (ret) :
28190+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
28191+ return ret;
28192+}
28193+
28194 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
28195 {
28196- long ret;
28197- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
28198+ if (likely(gtod->sysctl_enabled &&
28199+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
28200+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
28201+ {
28202 if (likely(tv != NULL)) {
28203 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
28204 offsetof(struct timespec, tv_nsec) ||
28205@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
28206 }
28207 return 0;
28208 }
28209- asm("syscall" : "=a" (ret) :
28210- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
28211- return ret;
28212+ return __vdso_fallback_gettimeofday(tv, tz);
28213 }
28214 int gettimeofday(struct timeval *, struct timezone *)
28215 __attribute__((weak, alias("__vdso_gettimeofday")));
28216diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
28217index 4e5dd3b..00ba15e 100644
28218--- a/arch/x86/vdso/vdso.lds.S
28219+++ b/arch/x86/vdso/vdso.lds.S
28220@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
28221 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
28222 #include "vextern.h"
28223 #undef VEXTERN
28224+
28225+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
28226+VEXTERN(fallback_gettimeofday)
28227+VEXTERN(fallback_time)
28228+VEXTERN(getcpu)
28229+#undef VEXTERN
28230diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
28231index 58bc00f..d53fb48 100644
28232--- a/arch/x86/vdso/vdso32-setup.c
28233+++ b/arch/x86/vdso/vdso32-setup.c
28234@@ -25,6 +25,7 @@
28235 #include <asm/tlbflush.h>
28236 #include <asm/vdso.h>
28237 #include <asm/proto.h>
28238+#include <asm/mman.h>
28239
28240 enum {
28241 VDSO_DISABLED = 0,
28242@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
28243 void enable_sep_cpu(void)
28244 {
28245 int cpu = get_cpu();
28246- struct tss_struct *tss = &per_cpu(init_tss, cpu);
28247+ struct tss_struct *tss = init_tss + cpu;
28248
28249 if (!boot_cpu_has(X86_FEATURE_SEP)) {
28250 put_cpu();
28251@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
28252 gate_vma.vm_start = FIXADDR_USER_START;
28253 gate_vma.vm_end = FIXADDR_USER_END;
28254 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
28255- gate_vma.vm_page_prot = __P101;
28256+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
28257 /*
28258 * Make sure the vDSO gets into every core dump.
28259 * Dumping its contents makes post-mortem fully interpretable later
28260@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28261 if (compat)
28262 addr = VDSO_HIGH_BASE;
28263 else {
28264- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
28265+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
28266 if (IS_ERR_VALUE(addr)) {
28267 ret = addr;
28268 goto up_fail;
28269 }
28270 }
28271
28272- current->mm->context.vdso = (void *)addr;
28273+ current->mm->context.vdso = addr;
28274
28275 if (compat_uses_vma || !compat) {
28276 /*
28277@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28278 }
28279
28280 current_thread_info()->sysenter_return =
28281- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
28282+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
28283
28284 up_fail:
28285 if (ret)
28286- current->mm->context.vdso = NULL;
28287+ current->mm->context.vdso = 0;
28288
28289 up_write(&mm->mmap_sem);
28290
28291@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
28292
28293 const char *arch_vma_name(struct vm_area_struct *vma)
28294 {
28295- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
28296+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
28297 return "[vdso]";
28298+
28299+#ifdef CONFIG_PAX_SEGMEXEC
28300+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
28301+ return "[vdso]";
28302+#endif
28303+
28304 return NULL;
28305 }
28306
28307@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
28308 struct mm_struct *mm = tsk->mm;
28309
28310 /* Check to see if this task was created in compat vdso mode */
28311- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
28312+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
28313 return &gate_vma;
28314 return NULL;
28315 }
28316diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
28317index 1683ba2..48d07f3 100644
28318--- a/arch/x86/vdso/vextern.h
28319+++ b/arch/x86/vdso/vextern.h
28320@@ -11,6 +11,5 @@
28321 put into vextern.h and be referenced as a pointer with vdso prefix.
28322 The main kernel later fills in the values. */
28323
28324-VEXTERN(jiffies)
28325 VEXTERN(vgetcpu_mode)
28326 VEXTERN(vsyscall_gtod_data)
28327diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
28328index 21e1aeb..2c0b3c4 100644
28329--- a/arch/x86/vdso/vma.c
28330+++ b/arch/x86/vdso/vma.c
28331@@ -17,8 +17,6 @@
28332 #include "vextern.h" /* Just for VMAGIC. */
28333 #undef VEXTERN
28334
28335-unsigned int __read_mostly vdso_enabled = 1;
28336-
28337 extern char vdso_start[], vdso_end[];
28338 extern unsigned short vdso_sync_cpuid;
28339
28340@@ -27,10 +25,8 @@ static unsigned vdso_size;
28341
28342 static inline void *var_ref(void *p, char *name)
28343 {
28344- if (*(void **)p != (void *)VMAGIC) {
28345- printk("VDSO: variable %s broken\n", name);
28346- vdso_enabled = 0;
28347- }
28348+ if (*(void **)p != (void *)VMAGIC)
28349+ panic("VDSO: variable %s broken\n", name);
28350 return p;
28351 }
28352
28353@@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
28354 if (!vbase)
28355 goto oom;
28356
28357- if (memcmp(vbase, "\177ELF", 4)) {
28358- printk("VDSO: I'm broken; not ELF\n");
28359- vdso_enabled = 0;
28360- }
28361+ if (memcmp(vbase, ELFMAG, SELFMAG))
28362+ panic("VDSO: I'm broken; not ELF\n");
28363
28364 #define VEXTERN(x) \
28365 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
28366 #include "vextern.h"
28367 #undef VEXTERN
28368+ vunmap(vbase);
28369 return 0;
28370
28371 oom:
28372- printk("Cannot allocate vdso\n");
28373- vdso_enabled = 0;
28374- return -ENOMEM;
28375+ panic("Cannot allocate vdso\n");
28376 }
28377 __initcall(init_vdso_vars);
28378
28379@@ -102,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
28380 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28381 {
28382 struct mm_struct *mm = current->mm;
28383- unsigned long addr;
28384+ unsigned long addr = 0;
28385 int ret;
28386
28387- if (!vdso_enabled)
28388- return 0;
28389-
28390 down_write(&mm->mmap_sem);
28391+
28392+#ifdef CONFIG_PAX_RANDMMAP
28393+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28394+#endif
28395+
28396 addr = vdso_addr(mm->start_stack, vdso_size);
28397 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
28398 if (IS_ERR_VALUE(addr)) {
28399@@ -116,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28400 goto up_fail;
28401 }
28402
28403- current->mm->context.vdso = (void *)addr;
28404+ current->mm->context.vdso = addr;
28405
28406 ret = install_special_mapping(mm, addr, vdso_size,
28407 VM_READ|VM_EXEC|
28408@@ -124,7 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28409 VM_ALWAYSDUMP,
28410 vdso_pages);
28411 if (ret) {
28412- current->mm->context.vdso = NULL;
28413+ current->mm->context.vdso = 0;
28414 goto up_fail;
28415 }
28416
28417@@ -132,10 +127,3 @@ up_fail:
28418 up_write(&mm->mmap_sem);
28419 return ret;
28420 }
28421-
28422-static __init int vdso_setup(char *s)
28423-{
28424- vdso_enabled = simple_strtoul(s, NULL, 0);
28425- return 0;
28426-}
28427-__setup("vdso=", vdso_setup);
28428diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
28429index 0087b00..eecb34f 100644
28430--- a/arch/x86/xen/enlighten.c
28431+++ b/arch/x86/xen/enlighten.c
28432@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
28433
28434 struct shared_info xen_dummy_shared_info;
28435
28436-void *xen_initial_gdt;
28437-
28438 /*
28439 * Point at some empty memory to start with. We map the real shared_info
28440 * page as soon as fixmap is up and running.
28441@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
28442
28443 preempt_disable();
28444
28445- start = __get_cpu_var(idt_desc).address;
28446+ start = (unsigned long)__get_cpu_var(idt_desc).address;
28447 end = start + __get_cpu_var(idt_desc).size + 1;
28448
28449 xen_mc_flush();
28450@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
28451 #endif
28452 };
28453
28454-static void xen_reboot(int reason)
28455+static __noreturn void xen_reboot(int reason)
28456 {
28457 struct sched_shutdown r = { .reason = reason };
28458
28459@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
28460 BUG();
28461 }
28462
28463-static void xen_restart(char *msg)
28464+static __noreturn void xen_restart(char *msg)
28465 {
28466 xen_reboot(SHUTDOWN_reboot);
28467 }
28468
28469-static void xen_emergency_restart(void)
28470+static __noreturn void xen_emergency_restart(void)
28471 {
28472 xen_reboot(SHUTDOWN_reboot);
28473 }
28474
28475-static void xen_machine_halt(void)
28476+static __noreturn void xen_machine_halt(void)
28477 {
28478 xen_reboot(SHUTDOWN_poweroff);
28479 }
28480@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(void)
28481 */
28482 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
28483
28484-#ifdef CONFIG_X86_64
28485 /* Work out if we support NX */
28486- check_efer();
28487+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
28488+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
28489+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
28490+ unsigned l, h;
28491+
28492+#ifdef CONFIG_X86_PAE
28493+ nx_enabled = 1;
28494+#endif
28495+ __supported_pte_mask |= _PAGE_NX;
28496+ rdmsr(MSR_EFER, l, h);
28497+ l |= EFER_NX;
28498+ wrmsr(MSR_EFER, l, h);
28499+ }
28500 #endif
28501
28502 xen_setup_features();
28503@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void)
28504
28505 machine_ops = xen_machine_ops;
28506
28507- /*
28508- * The only reliable way to retain the initial address of the
28509- * percpu gdt_page is to remember it here, so we can go and
28510- * mark it RW later, when the initial percpu area is freed.
28511- */
28512- xen_initial_gdt = &per_cpu(gdt_page, 0);
28513-
28514 xen_smp_init();
28515
28516 pgd = (pgd_t *)xen_start_info->pt_base;
28517diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
28518index 3f90a2c..2c2ad84 100644
28519--- a/arch/x86/xen/mmu.c
28520+++ b/arch/x86/xen/mmu.c
28521@@ -1719,6 +1719,9 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
28522 convert_pfn_mfn(init_level4_pgt);
28523 convert_pfn_mfn(level3_ident_pgt);
28524 convert_pfn_mfn(level3_kernel_pgt);
28525+ convert_pfn_mfn(level3_vmalloc_start_pgt);
28526+ convert_pfn_mfn(level3_vmalloc_end_pgt);
28527+ convert_pfn_mfn(level3_vmemmap_pgt);
28528
28529 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
28530 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
28531@@ -1737,7 +1740,11 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
28532 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
28533 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
28534 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
28535+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
28536+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
28537+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
28538 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
28539+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
28540 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
28541 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
28542
28543@@ -1860,6 +1867,7 @@ static __init void xen_post_allocator_init(void)
28544 pv_mmu_ops.set_pud = xen_set_pud;
28545 #if PAGETABLE_LEVELS == 4
28546 pv_mmu_ops.set_pgd = xen_set_pgd;
28547+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
28548 #endif
28549
28550 /* This will work as long as patching hasn't happened yet
28551@@ -1946,6 +1954,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
28552 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
28553 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
28554 .set_pgd = xen_set_pgd_hyper,
28555+ .set_pgd_batched = xen_set_pgd_hyper,
28556
28557 .alloc_pud = xen_alloc_pmd_init,
28558 .release_pud = xen_release_pmd_init,
28559diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
28560index a96204a..fca9b8e 100644
28561--- a/arch/x86/xen/smp.c
28562+++ b/arch/x86/xen/smp.c
28563@@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
28564 {
28565 BUG_ON(smp_processor_id() != 0);
28566 native_smp_prepare_boot_cpu();
28567-
28568- /* We've switched to the "real" per-cpu gdt, so make sure the
28569- old memory can be recycled */
28570- make_lowmem_page_readwrite(xen_initial_gdt);
28571-
28572 xen_setup_vcpu_info_placement();
28573 }
28574
28575@@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
28576 gdt = get_cpu_gdt_table(cpu);
28577
28578 ctxt->flags = VGCF_IN_KERNEL;
28579- ctxt->user_regs.ds = __USER_DS;
28580- ctxt->user_regs.es = __USER_DS;
28581+ ctxt->user_regs.ds = __KERNEL_DS;
28582+ ctxt->user_regs.es = __KERNEL_DS;
28583 ctxt->user_regs.ss = __KERNEL_DS;
28584 #ifdef CONFIG_X86_32
28585 ctxt->user_regs.fs = __KERNEL_PERCPU;
28586- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
28587+ savesegment(gs, ctxt->user_regs.gs);
28588 #else
28589 ctxt->gs_base_kernel = per_cpu_offset(cpu);
28590 #endif
28591@@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
28592 int rc;
28593
28594 per_cpu(current_task, cpu) = idle;
28595+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
28596 #ifdef CONFIG_X86_32
28597 irq_ctx_init(cpu);
28598 #else
28599 clear_tsk_thread_flag(idle, TIF_FORK);
28600- per_cpu(kernel_stack, cpu) =
28601- (unsigned long)task_stack_page(idle) -
28602- KERNEL_STACK_OFFSET + THREAD_SIZE;
28603+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
28604 #endif
28605 xen_setup_runstate_info(cpu);
28606 xen_setup_timer(cpu);
28607diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
28608index 9a95a9c..4f39e774 100644
28609--- a/arch/x86/xen/xen-asm_32.S
28610+++ b/arch/x86/xen/xen-asm_32.S
28611@@ -83,14 +83,14 @@ ENTRY(xen_iret)
28612 ESP_OFFSET=4 # bytes pushed onto stack
28613
28614 /*
28615- * Store vcpu_info pointer for easy access. Do it this way to
28616- * avoid having to reload %fs
28617+ * Store vcpu_info pointer for easy access.
28618 */
28619 #ifdef CONFIG_SMP
28620- GET_THREAD_INFO(%eax)
28621- movl TI_cpu(%eax), %eax
28622- movl __per_cpu_offset(,%eax,4), %eax
28623- mov per_cpu__xen_vcpu(%eax), %eax
28624+ push %fs
28625+ mov $(__KERNEL_PERCPU), %eax
28626+ mov %eax, %fs
28627+ mov PER_CPU_VAR(xen_vcpu), %eax
28628+ pop %fs
28629 #else
28630 movl per_cpu__xen_vcpu, %eax
28631 #endif
28632diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
28633index 1a5ff24..a187d40 100644
28634--- a/arch/x86/xen/xen-head.S
28635+++ b/arch/x86/xen/xen-head.S
28636@@ -19,6 +19,17 @@ ENTRY(startup_xen)
28637 #ifdef CONFIG_X86_32
28638 mov %esi,xen_start_info
28639 mov $init_thread_union+THREAD_SIZE,%esp
28640+#ifdef CONFIG_SMP
28641+ movl $cpu_gdt_table,%edi
28642+ movl $__per_cpu_load,%eax
28643+ movw %ax,__KERNEL_PERCPU + 2(%edi)
28644+ rorl $16,%eax
28645+ movb %al,__KERNEL_PERCPU + 4(%edi)
28646+ movb %ah,__KERNEL_PERCPU + 7(%edi)
28647+ movl $__per_cpu_end - 1,%eax
28648+ subl $__per_cpu_start,%eax
28649+ movw %ax,__KERNEL_PERCPU + 0(%edi)
28650+#endif
28651 #else
28652 mov %rsi,xen_start_info
28653 mov $init_thread_union+THREAD_SIZE,%rsp
28654diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
28655index f9153a3..51eab3d 100644
28656--- a/arch/x86/xen/xen-ops.h
28657+++ b/arch/x86/xen/xen-ops.h
28658@@ -10,8 +10,6 @@
28659 extern const char xen_hypervisor_callback[];
28660 extern const char xen_failsafe_callback[];
28661
28662-extern void *xen_initial_gdt;
28663-
28664 struct trap_info;
28665 void xen_copy_trap_info(struct trap_info *traps);
28666
28667diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
28668index 525bd3d..ef888b1 100644
28669--- a/arch/xtensa/variants/dc232b/include/variant/core.h
28670+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
28671@@ -119,9 +119,9 @@
28672 ----------------------------------------------------------------------*/
28673
28674 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
28675-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
28676 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
28677 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
28678+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
28679
28680 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
28681 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
28682diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
28683index 2f33760..835e50a 100644
28684--- a/arch/xtensa/variants/fsf/include/variant/core.h
28685+++ b/arch/xtensa/variants/fsf/include/variant/core.h
28686@@ -11,6 +11,7 @@
28687 #ifndef _XTENSA_CORE_H
28688 #define _XTENSA_CORE_H
28689
28690+#include <linux/const.h>
28691
28692 /****************************************************************************
28693 Parameters Useful for Any Code, USER or PRIVILEGED
28694@@ -112,9 +113,9 @@
28695 ----------------------------------------------------------------------*/
28696
28697 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
28698-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
28699 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
28700 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
28701+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
28702
28703 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
28704 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
28705diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
28706index af00795..2bb8105 100644
28707--- a/arch/xtensa/variants/s6000/include/variant/core.h
28708+++ b/arch/xtensa/variants/s6000/include/variant/core.h
28709@@ -11,6 +11,7 @@
28710 #ifndef _XTENSA_CORE_CONFIGURATION_H
28711 #define _XTENSA_CORE_CONFIGURATION_H
28712
28713+#include <linux/const.h>
28714
28715 /****************************************************************************
28716 Parameters Useful for Any Code, USER or PRIVILEGED
28717@@ -118,9 +119,9 @@
28718 ----------------------------------------------------------------------*/
28719
28720 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
28721-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
28722 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
28723 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
28724+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
28725
28726 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
28727 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
28728diff --git a/block/blk-integrity.c b/block/blk-integrity.c
28729index 15c6308..96e83c2 100644
28730--- a/block/blk-integrity.c
28731+++ b/block/blk-integrity.c
28732@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs[] = {
28733 NULL,
28734 };
28735
28736-static struct sysfs_ops integrity_ops = {
28737+static const struct sysfs_ops integrity_ops = {
28738 .show = &integrity_attr_show,
28739 .store = &integrity_attr_store,
28740 };
28741diff --git a/block/blk-ioc.c b/block/blk-ioc.c
28742index d4ed600..cbdabb0 100644
28743--- a/block/blk-ioc.c
28744+++ b/block/blk-ioc.c
28745@@ -66,22 +66,22 @@ static void cfq_exit(struct io_context *ioc)
28746 }
28747
28748 /* Called by the exitting task */
28749-void exit_io_context(void)
28750+void exit_io_context(struct task_struct *task)
28751 {
28752 struct io_context *ioc;
28753
28754- task_lock(current);
28755- ioc = current->io_context;
28756- current->io_context = NULL;
28757- task_unlock(current);
28758+ task_lock(task);
28759+ ioc = task->io_context;
28760+ task->io_context = NULL;
28761+ task_unlock(task);
28762
28763 if (atomic_dec_and_test(&ioc->nr_tasks)) {
28764 if (ioc->aic && ioc->aic->exit)
28765 ioc->aic->exit(ioc->aic);
28766 cfq_exit(ioc);
28767
28768- put_io_context(ioc);
28769 }
28770+ put_io_context(ioc);
28771 }
28772
28773 struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
28774diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
28775index ca56420..f2fc409 100644
28776--- a/block/blk-iopoll.c
28777+++ b/block/blk-iopoll.c
28778@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
28779 }
28780 EXPORT_SYMBOL(blk_iopoll_complete);
28781
28782-static void blk_iopoll_softirq(struct softirq_action *h)
28783+static void blk_iopoll_softirq(void)
28784 {
28785 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
28786 int rearm = 0, budget = blk_iopoll_budget;
28787diff --git a/block/blk-map.c b/block/blk-map.c
28788index 30a7e51..0aeec6a 100644
28789--- a/block/blk-map.c
28790+++ b/block/blk-map.c
28791@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
28792 * direct dma. else, set up kernel bounce buffers
28793 */
28794 uaddr = (unsigned long) ubuf;
28795- if (blk_rq_aligned(q, ubuf, len) && !map_data)
28796+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
28797 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
28798 else
28799 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
28800@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
28801 for (i = 0; i < iov_count; i++) {
28802 unsigned long uaddr = (unsigned long)iov[i].iov_base;
28803
28804+ if (!iov[i].iov_len)
28805+ return -EINVAL;
28806+
28807 if (uaddr & queue_dma_alignment(q)) {
28808 unaligned = 1;
28809 break;
28810 }
28811- if (!iov[i].iov_len)
28812- return -EINVAL;
28813 }
28814
28815 if (unaligned || (q->dma_pad_mask & len) || map_data)
28816@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
28817 if (!len || !kbuf)
28818 return -EINVAL;
28819
28820- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
28821+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
28822 if (do_copy)
28823 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
28824 else
28825diff --git a/block/blk-softirq.c b/block/blk-softirq.c
28826index ee9c216..58d410a 100644
28827--- a/block/blk-softirq.c
28828+++ b/block/blk-softirq.c
28829@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
28830 * Softirq action handler - move entries to local list and loop over them
28831 * while passing them to the queue registered handler.
28832 */
28833-static void blk_done_softirq(struct softirq_action *h)
28834+static void blk_done_softirq(void)
28835 {
28836 struct list_head *cpu_list, local_list;
28837
28838diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
28839index bb9c5ea..5330d48 100644
28840--- a/block/blk-sysfs.c
28841+++ b/block/blk-sysfs.c
28842@@ -414,7 +414,7 @@ static void blk_release_queue(struct kobject *kobj)
28843 kmem_cache_free(blk_requestq_cachep, q);
28844 }
28845
28846-static struct sysfs_ops queue_sysfs_ops = {
28847+static const struct sysfs_ops queue_sysfs_ops = {
28848 .show = queue_attr_show,
28849 .store = queue_attr_store,
28850 };
28851diff --git a/block/bsg.c b/block/bsg.c
28852index 7154a7a..08ac2f0 100644
28853--- a/block/bsg.c
28854+++ b/block/bsg.c
28855@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
28856 struct sg_io_v4 *hdr, struct bsg_device *bd,
28857 fmode_t has_write_perm)
28858 {
28859+ unsigned char tmpcmd[sizeof(rq->__cmd)];
28860+ unsigned char *cmdptr;
28861+
28862 if (hdr->request_len > BLK_MAX_CDB) {
28863 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
28864 if (!rq->cmd)
28865 return -ENOMEM;
28866- }
28867+ cmdptr = rq->cmd;
28868+ } else
28869+ cmdptr = tmpcmd;
28870
28871- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
28872+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
28873 hdr->request_len))
28874 return -EFAULT;
28875
28876+ if (cmdptr != rq->cmd)
28877+ memcpy(rq->cmd, cmdptr, hdr->request_len);
28878+
28879 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
28880 if (blk_verify_command(rq->cmd, has_write_perm))
28881 return -EPERM;
28882@@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
28883 rq->next_rq = next_rq;
28884 next_rq->cmd_type = rq->cmd_type;
28885
28886- dxferp = (void*)(unsigned long)hdr->din_xferp;
28887+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
28888 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
28889 hdr->din_xfer_len, GFP_KERNEL);
28890 if (ret)
28891@@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
28892
28893 if (hdr->dout_xfer_len) {
28894 dxfer_len = hdr->dout_xfer_len;
28895- dxferp = (void*)(unsigned long)hdr->dout_xferp;
28896+ dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
28897 } else if (hdr->din_xfer_len) {
28898 dxfer_len = hdr->din_xfer_len;
28899- dxferp = (void*)(unsigned long)hdr->din_xferp;
28900+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
28901 } else
28902 dxfer_len = 0;
28903
28904@@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
28905 int len = min_t(unsigned int, hdr->max_response_len,
28906 rq->sense_len);
28907
28908- ret = copy_to_user((void*)(unsigned long)hdr->response,
28909+ ret = copy_to_user((void __user *)(unsigned long)hdr->response,
28910 rq->sense, len);
28911 if (!ret)
28912 hdr->response_len = len;
28913diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
28914index 9bd086c..ca1fc22 100644
28915--- a/block/compat_ioctl.c
28916+++ b/block/compat_ioctl.c
28917@@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
28918 err |= __get_user(f->spec1, &uf->spec1);
28919 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
28920 err |= __get_user(name, &uf->name);
28921- f->name = compat_ptr(name);
28922+ f->name = (void __force_kernel *)compat_ptr(name);
28923 if (err) {
28924 err = -EFAULT;
28925 goto out;
28926diff --git a/block/elevator.c b/block/elevator.c
28927index a847046..75a1746 100644
28928--- a/block/elevator.c
28929+++ b/block/elevator.c
28930@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
28931 return error;
28932 }
28933
28934-static struct sysfs_ops elv_sysfs_ops = {
28935+static const struct sysfs_ops elv_sysfs_ops = {
28936 .show = elv_attr_show,
28937 .store = elv_attr_store,
28938 };
28939diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
28940index 2be0a97..bded3fd 100644
28941--- a/block/scsi_ioctl.c
28942+++ b/block/scsi_ioctl.c
28943@@ -221,8 +221,20 @@ EXPORT_SYMBOL(blk_verify_command);
28944 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
28945 struct sg_io_hdr *hdr, fmode_t mode)
28946 {
28947- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
28948+ unsigned char tmpcmd[sizeof(rq->__cmd)];
28949+ unsigned char *cmdptr;
28950+
28951+ if (rq->cmd != rq->__cmd)
28952+ cmdptr = rq->cmd;
28953+ else
28954+ cmdptr = tmpcmd;
28955+
28956+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
28957 return -EFAULT;
28958+
28959+ if (cmdptr != rq->cmd)
28960+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
28961+
28962 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
28963 return -EPERM;
28964
28965@@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
28966 int err;
28967 unsigned int in_len, out_len, bytes, opcode, cmdlen;
28968 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
28969+ unsigned char tmpcmd[sizeof(rq->__cmd)];
28970+ unsigned char *cmdptr;
28971
28972 if (!sic)
28973 return -EINVAL;
28974@@ -464,9 +478,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
28975 */
28976 err = -EFAULT;
28977 rq->cmd_len = cmdlen;
28978- if (copy_from_user(rq->cmd, sic->data, cmdlen))
28979+
28980+ if (rq->cmd != rq->__cmd)
28981+ cmdptr = rq->cmd;
28982+ else
28983+ cmdptr = tmpcmd;
28984+
28985+ if (copy_from_user(cmdptr, sic->data, cmdlen))
28986 goto error;
28987
28988+ if (rq->cmd != cmdptr)
28989+ memcpy(rq->cmd, cmdptr, cmdlen);
28990+
28991 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
28992 goto error;
28993
28994diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
28995index f6f0833..514d986 100644
28996--- a/crypto/ablkcipher.c
28997+++ b/crypto/ablkcipher.c
28998@@ -29,6 +29,8 @@
28999 static const char *skcipher_default_geniv __read_mostly;
29000
29001 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
29002+ unsigned int keylen) __size_overflow(3);
29003+static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
29004 unsigned int keylen)
29005 {
29006 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
29007@@ -51,6 +53,8 @@ static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
29008 }
29009
29010 static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
29011+ unsigned int keylen) __size_overflow(3);
29012+static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
29013 unsigned int keylen)
29014 {
29015 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
29016diff --git a/crypto/aead.c b/crypto/aead.c
29017index 0a55da7..9256a04 100644
29018--- a/crypto/aead.c
29019+++ b/crypto/aead.c
29020@@ -25,6 +25,8 @@
29021 #include "internal.h"
29022
29023 static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
29024+ unsigned int keylen) __size_overflow(3);
29025+static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
29026 unsigned int keylen)
29027 {
29028 struct aead_alg *aead = crypto_aead_alg(tfm);
29029@@ -46,6 +48,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
29030 return ret;
29031 }
29032
29033+static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
29034 static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
29035 {
29036 struct aead_alg *aead = crypto_aead_alg(tfm);
29037diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
29038index 90d26c9..3db7c03 100644
29039--- a/crypto/blkcipher.c
29040+++ b/crypto/blkcipher.c
29041@@ -357,6 +357,8 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
29042 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
29043
29044 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
29045+ unsigned int keylen) __size_overflow(3);
29046+static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
29047 unsigned int keylen)
29048 {
29049 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
29050@@ -378,6 +380,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
29051 return ret;
29052 }
29053
29054+static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
29055 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
29056 {
29057 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
29058diff --git a/crypto/cipher.c b/crypto/cipher.c
29059index 9a1a731..41454c2 100644
29060--- a/crypto/cipher.c
29061+++ b/crypto/cipher.c
29062@@ -21,6 +21,8 @@
29063 #include "internal.h"
29064
29065 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
29066+ unsigned int keylen) __size_overflow(3);
29067+static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
29068 unsigned int keylen)
29069 {
29070 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
29071@@ -43,6 +45,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
29072
29073 }
29074
29075+static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
29076 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
29077 {
29078 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
29079diff --git a/crypto/cryptd.c b/crypto/cryptd.c
29080index 3533582..f143117 100644
29081--- a/crypto/cryptd.c
29082+++ b/crypto/cryptd.c
29083@@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
29084
29085 struct cryptd_blkcipher_request_ctx {
29086 crypto_completion_t complete;
29087-};
29088+} __no_const;
29089
29090 struct cryptd_hash_ctx {
29091 struct crypto_shash *child;
29092diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
29093index a90d260..7a9765e 100644
29094--- a/crypto/gf128mul.c
29095+++ b/crypto/gf128mul.c
29096@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b)
29097 for (i = 0; i < 7; ++i)
29098 gf128mul_x_lle(&p[i + 1], &p[i]);
29099
29100- memset(r, 0, sizeof(r));
29101+ memset(r, 0, sizeof(*r));
29102 for (i = 0;;) {
29103 u8 ch = ((u8 *)b)[15 - i];
29104
29105@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b)
29106 for (i = 0; i < 7; ++i)
29107 gf128mul_x_bbe(&p[i + 1], &p[i]);
29108
29109- memset(r, 0, sizeof(r));
29110+ memset(r, 0, sizeof(*r));
29111 for (i = 0;;) {
29112 u8 ch = ((u8 *)b)[i];
29113
29114diff --git a/crypto/serpent.c b/crypto/serpent.c
29115index b651a55..023297d 100644
29116--- a/crypto/serpent.c
29117+++ b/crypto/serpent.c
29118@@ -21,6 +21,7 @@
29119 #include <asm/byteorder.h>
29120 #include <linux/crypto.h>
29121 #include <linux/types.h>
29122+#include <linux/sched.h>
29123
29124 /* Key is padded to the maximum of 256 bits before round key generation.
29125 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
29126@@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
29127 u32 r0,r1,r2,r3,r4;
29128 int i;
29129
29130+ pax_track_stack();
29131+
29132 /* Copy key, add padding */
29133
29134 for (i = 0; i < keylen; ++i)
29135diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
29136index 0d2cdb8..d8de48d 100644
29137--- a/drivers/acpi/acpi_pad.c
29138+++ b/drivers/acpi/acpi_pad.c
29139@@ -30,7 +30,7 @@
29140 #include <acpi/acpi_bus.h>
29141 #include <acpi/acpi_drivers.h>
29142
29143-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
29144+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
29145 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
29146 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
29147 static DEFINE_MUTEX(isolated_cpus_lock);
29148diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
29149index 3f4602b..1978af1 100644
29150--- a/drivers/acpi/battery.c
29151+++ b/drivers/acpi/battery.c
29152@@ -678,6 +678,9 @@ static int acpi_battery_print_alarm(struct seq_file *seq, int result)
29153
29154 static ssize_t acpi_battery_write_alarm(struct file *file,
29155 const char __user * buffer,
29156+ size_t count, loff_t * ppos) __size_overflow(3);
29157+static ssize_t acpi_battery_write_alarm(struct file *file,
29158+ const char __user * buffer,
29159 size_t count, loff_t * ppos)
29160 {
29161 int result = 0;
29162@@ -763,7 +766,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
29163 }
29164
29165 static struct battery_file {
29166- struct file_operations ops;
29167+ const struct file_operations ops;
29168 mode_t mode;
29169 const char *name;
29170 } acpi_battery_file[] = {
29171diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
29172index 7338b6a..82f0257 100644
29173--- a/drivers/acpi/dock.c
29174+++ b/drivers/acpi/dock.c
29175@@ -77,7 +77,7 @@ struct dock_dependent_device {
29176 struct list_head list;
29177 struct list_head hotplug_list;
29178 acpi_handle handle;
29179- struct acpi_dock_ops *ops;
29180+ const struct acpi_dock_ops *ops;
29181 void *context;
29182 };
29183
29184@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
29185 * the dock driver after _DCK is executed.
29186 */
29187 int
29188-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
29189+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
29190 void *context)
29191 {
29192 struct dock_dependent_device *dd;
29193diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
29194index 7c1c59e..2993595 100644
29195--- a/drivers/acpi/osl.c
29196+++ b/drivers/acpi/osl.c
29197@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
29198 void __iomem *virt_addr;
29199
29200 virt_addr = ioremap(phys_addr, width);
29201+ if (!virt_addr)
29202+ return AE_NO_MEMORY;
29203 if (!value)
29204 value = &dummy;
29205
29206@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
29207 void __iomem *virt_addr;
29208
29209 virt_addr = ioremap(phys_addr, width);
29210+ if (!virt_addr)
29211+ return AE_NO_MEMORY;
29212
29213 switch (width) {
29214 case 8:
29215diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
29216index c216062..eec10d2 100644
29217--- a/drivers/acpi/power_meter.c
29218+++ b/drivers/acpi/power_meter.c
29219@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
29220 return res;
29221
29222 temp /= 1000;
29223- if (temp < 0)
29224- return -EINVAL;
29225
29226 mutex_lock(&resource->lock);
29227 resource->trip[attr->index - 7] = temp;
29228diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
29229index d0d25e2..961643d 100644
29230--- a/drivers/acpi/proc.c
29231+++ b/drivers/acpi/proc.c
29232@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file,
29233 size_t count, loff_t * ppos)
29234 {
29235 struct list_head *node, *next;
29236- char strbuf[5];
29237- char str[5] = "";
29238- unsigned int len = count;
29239+ char strbuf[5] = {0};
29240 struct acpi_device *found_dev = NULL;
29241
29242- if (len > 4)
29243- len = 4;
29244- if (len < 0)
29245- return -EFAULT;
29246+ if (count > 4)
29247+ count = 4;
29248
29249- if (copy_from_user(strbuf, buffer, len))
29250+ if (copy_from_user(strbuf, buffer, count))
29251 return -EFAULT;
29252- strbuf[len] = '\0';
29253- sscanf(strbuf, "%s", str);
29254+ strbuf[count] = '\0';
29255
29256 mutex_lock(&acpi_device_lock);
29257 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
29258@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file,
29259 if (!dev->wakeup.flags.valid)
29260 continue;
29261
29262- if (!strncmp(dev->pnp.bus_id, str, 4)) {
29263+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
29264 dev->wakeup.state.enabled =
29265 dev->wakeup.state.enabled ? 0 : 1;
29266 found_dev = dev;
29267diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
29268index 7102474..de8ad22 100644
29269--- a/drivers/acpi/processor_core.c
29270+++ b/drivers/acpi/processor_core.c
29271@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
29272 return 0;
29273 }
29274
29275- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
29276+ BUG_ON(pr->id >= nr_cpu_ids);
29277
29278 /*
29279 * Buggy BIOS check
29280diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
29281index 52b9db8..a519aab 100644
29282--- a/drivers/acpi/sbs.c
29283+++ b/drivers/acpi/sbs.c
29284@@ -647,6 +647,9 @@ static int acpi_battery_read_alarm(struct seq_file *seq, void *offset)
29285
29286 static ssize_t
29287 acpi_battery_write_alarm(struct file *file, const char __user * buffer,
29288+ size_t count, loff_t * ppos) __size_overflow(3);
29289+static ssize_t
29290+acpi_battery_write_alarm(struct file *file, const char __user * buffer,
29291 size_t count, loff_t * ppos)
29292 {
29293 struct seq_file *seq = file->private_data;
29294diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
29295index d933980..5761f13 100644
29296--- a/drivers/acpi/sbshc.c
29297+++ b/drivers/acpi/sbshc.c
29298@@ -17,7 +17,7 @@
29299
29300 #define PREFIX "ACPI: "
29301
29302-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
29303+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
29304 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
29305
29306 struct acpi_smb_hc {
29307diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
29308index 0458094..6978e7b 100644
29309--- a/drivers/acpi/sleep.c
29310+++ b/drivers/acpi/sleep.c
29311@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
29312 }
29313 }
29314
29315-static struct platform_suspend_ops acpi_suspend_ops = {
29316+static const struct platform_suspend_ops acpi_suspend_ops = {
29317 .valid = acpi_suspend_state_valid,
29318 .begin = acpi_suspend_begin,
29319 .prepare_late = acpi_pm_prepare,
29320@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
29321 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
29322 * been requested.
29323 */
29324-static struct platform_suspend_ops acpi_suspend_ops_old = {
29325+static const struct platform_suspend_ops acpi_suspend_ops_old = {
29326 .valid = acpi_suspend_state_valid,
29327 .begin = acpi_suspend_begin_old,
29328 .prepare_late = acpi_pm_disable_gpes,
29329@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
29330 acpi_enable_all_runtime_gpes();
29331 }
29332
29333-static struct platform_hibernation_ops acpi_hibernation_ops = {
29334+static const struct platform_hibernation_ops acpi_hibernation_ops = {
29335 .begin = acpi_hibernation_begin,
29336 .end = acpi_pm_end,
29337 .pre_snapshot = acpi_hibernation_pre_snapshot,
29338@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
29339 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
29340 * been requested.
29341 */
29342-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
29343+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
29344 .begin = acpi_hibernation_begin_old,
29345 .end = acpi_pm_end,
29346 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
29347diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
29348index 05dff63..b662ab7 100644
29349--- a/drivers/acpi/video.c
29350+++ b/drivers/acpi/video.c
29351@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
29352 vd->brightness->levels[request_level]);
29353 }
29354
29355-static struct backlight_ops acpi_backlight_ops = {
29356+static const struct backlight_ops acpi_backlight_ops = {
29357 .get_brightness = acpi_video_get_brightness,
29358 .update_status = acpi_video_set_brightness,
29359 };
29360diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
29361index 6787aab..23ffb0e 100644
29362--- a/drivers/ata/ahci.c
29363+++ b/drivers/ata/ahci.c
29364@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sht = {
29365 .sdev_attrs = ahci_sdev_attrs,
29366 };
29367
29368-static struct ata_port_operations ahci_ops = {
29369+static const struct ata_port_operations ahci_ops = {
29370 .inherits = &sata_pmp_port_ops,
29371
29372 .qc_defer = sata_pmp_qc_defer_cmd_switch,
29373@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_ops = {
29374 .port_stop = ahci_port_stop,
29375 };
29376
29377-static struct ata_port_operations ahci_vt8251_ops = {
29378+static const struct ata_port_operations ahci_vt8251_ops = {
29379 .inherits = &ahci_ops,
29380 .hardreset = ahci_vt8251_hardreset,
29381 };
29382
29383-static struct ata_port_operations ahci_p5wdh_ops = {
29384+static const struct ata_port_operations ahci_p5wdh_ops = {
29385 .inherits = &ahci_ops,
29386 .hardreset = ahci_p5wdh_hardreset,
29387 };
29388
29389-static struct ata_port_operations ahci_sb600_ops = {
29390+static const struct ata_port_operations ahci_sb600_ops = {
29391 .inherits = &ahci_ops,
29392 .softreset = ahci_sb600_softreset,
29393 .pmp_softreset = ahci_sb600_softreset,
29394diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
29395index 99e7196..4968c77 100644
29396--- a/drivers/ata/ata_generic.c
29397+++ b/drivers/ata/ata_generic.c
29398@@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = {
29399 ATA_BMDMA_SHT(DRV_NAME),
29400 };
29401
29402-static struct ata_port_operations generic_port_ops = {
29403+static const struct ata_port_operations generic_port_ops = {
29404 .inherits = &ata_bmdma_port_ops,
29405 .cable_detect = ata_cable_unknown,
29406 .set_mode = generic_set_mode,
29407diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
29408index c33591d..000c121 100644
29409--- a/drivers/ata/ata_piix.c
29410+++ b/drivers/ata/ata_piix.c
29411@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sht = {
29412 ATA_BMDMA_SHT(DRV_NAME),
29413 };
29414
29415-static struct ata_port_operations piix_pata_ops = {
29416+static const struct ata_port_operations piix_pata_ops = {
29417 .inherits = &ata_bmdma32_port_ops,
29418 .cable_detect = ata_cable_40wire,
29419 .set_piomode = piix_set_piomode,
29420@@ -326,22 +326,22 @@ static struct ata_port_operations piix_pata_ops = {
29421 .prereset = piix_pata_prereset,
29422 };
29423
29424-static struct ata_port_operations piix_vmw_ops = {
29425+static const struct ata_port_operations piix_vmw_ops = {
29426 .inherits = &piix_pata_ops,
29427 .bmdma_status = piix_vmw_bmdma_status,
29428 };
29429
29430-static struct ata_port_operations ich_pata_ops = {
29431+static const struct ata_port_operations ich_pata_ops = {
29432 .inherits = &piix_pata_ops,
29433 .cable_detect = ich_pata_cable_detect,
29434 .set_dmamode = ich_set_dmamode,
29435 };
29436
29437-static struct ata_port_operations piix_sata_ops = {
29438+static const struct ata_port_operations piix_sata_ops = {
29439 .inherits = &ata_bmdma_port_ops,
29440 };
29441
29442-static struct ata_port_operations piix_sidpr_sata_ops = {
29443+static const struct ata_port_operations piix_sidpr_sata_ops = {
29444 .inherits = &piix_sata_ops,
29445 .hardreset = sata_std_hardreset,
29446 .scr_read = piix_sidpr_scr_read,
29447diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
29448index b0882cd..c295d65 100644
29449--- a/drivers/ata/libata-acpi.c
29450+++ b/drivers/ata/libata-acpi.c
29451@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
29452 ata_acpi_uevent(dev->link->ap, dev, event);
29453 }
29454
29455-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
29456+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
29457 .handler = ata_acpi_dev_notify_dock,
29458 .uevent = ata_acpi_dev_uevent,
29459 };
29460
29461-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
29462+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
29463 .handler = ata_acpi_ap_notify_dock,
29464 .uevent = ata_acpi_ap_uevent,
29465 };
29466diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
29467index d4f7f99..94f603e 100644
29468--- a/drivers/ata/libata-core.c
29469+++ b/drivers/ata/libata-core.c
29470@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
29471 struct ata_port *ap;
29472 unsigned int tag;
29473
29474- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29475+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29476 ap = qc->ap;
29477
29478 qc->flags = 0;
29479@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
29480 struct ata_port *ap;
29481 struct ata_link *link;
29482
29483- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29484+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29485 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
29486 ap = qc->ap;
29487 link = qc->dev->link;
29488@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device *gendev, void *res)
29489 * LOCKING:
29490 * None.
29491 */
29492-static void ata_finalize_port_ops(struct ata_port_operations *ops)
29493+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
29494 {
29495 static DEFINE_SPINLOCK(lock);
29496 const struct ata_port_operations *cur;
29497@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
29498 return;
29499
29500 spin_lock(&lock);
29501+ pax_open_kernel();
29502
29503 for (cur = ops->inherits; cur; cur = cur->inherits) {
29504 void **inherit = (void **)cur;
29505@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
29506 if (IS_ERR(*pp))
29507 *pp = NULL;
29508
29509- ops->inherits = NULL;
29510+ *(struct ata_port_operations **)&ops->inherits = NULL;
29511
29512+ pax_close_kernel();
29513 spin_unlock(&lock);
29514 }
29515
29516@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host)
29517 */
29518 /* KILLME - the only user left is ipr */
29519 void ata_host_init(struct ata_host *host, struct device *dev,
29520- unsigned long flags, struct ata_port_operations *ops)
29521+ unsigned long flags, const struct ata_port_operations *ops)
29522 {
29523 spin_lock_init(&host->lock);
29524 host->dev = dev;
29525@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(struct ata_port *ap)
29526 /* truly dummy */
29527 }
29528
29529-struct ata_port_operations ata_dummy_port_ops = {
29530+const struct ata_port_operations ata_dummy_port_ops = {
29531 .qc_prep = ata_noop_qc_prep,
29532 .qc_issue = ata_dummy_qc_issue,
29533 .error_handler = ata_dummy_error_handler,
29534diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
29535index e5bdb9b..45a8e72 100644
29536--- a/drivers/ata/libata-eh.c
29537+++ b/drivers/ata/libata-eh.c
29538@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
29539 {
29540 struct ata_link *link;
29541
29542+ pax_track_stack();
29543+
29544 ata_for_each_link(link, ap, HOST_FIRST)
29545 ata_eh_link_report(link);
29546 }
29547@@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
29548 */
29549 void ata_std_error_handler(struct ata_port *ap)
29550 {
29551- struct ata_port_operations *ops = ap->ops;
29552+ const struct ata_port_operations *ops = ap->ops;
29553 ata_reset_fn_t hardreset = ops->hardreset;
29554
29555 /* ignore built-in hardreset if SCR access is not available */
29556diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
29557index 51f0ffb..19ce3e3 100644
29558--- a/drivers/ata/libata-pmp.c
29559+++ b/drivers/ata/libata-pmp.c
29560@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
29561 */
29562 static int sata_pmp_eh_recover(struct ata_port *ap)
29563 {
29564- struct ata_port_operations *ops = ap->ops;
29565+ const struct ata_port_operations *ops = ap->ops;
29566 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
29567 struct ata_link *pmp_link = &ap->link;
29568 struct ata_device *pmp_dev = pmp_link->device;
29569diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
29570index d8f35fe..288180a 100644
29571--- a/drivers/ata/pata_acpi.c
29572+++ b/drivers/ata/pata_acpi.c
29573@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_sht = {
29574 ATA_BMDMA_SHT(DRV_NAME),
29575 };
29576
29577-static struct ata_port_operations pacpi_ops = {
29578+static const struct ata_port_operations pacpi_ops = {
29579 .inherits = &ata_bmdma_port_ops,
29580 .qc_issue = pacpi_qc_issue,
29581 .cable_detect = pacpi_cable_detect,
29582diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
29583index 9434114..1f2f364 100644
29584--- a/drivers/ata/pata_ali.c
29585+++ b/drivers/ata/pata_ali.c
29586@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht = {
29587 * Port operations for PIO only ALi
29588 */
29589
29590-static struct ata_port_operations ali_early_port_ops = {
29591+static const struct ata_port_operations ali_early_port_ops = {
29592 .inherits = &ata_sff_port_ops,
29593 .cable_detect = ata_cable_40wire,
29594 .set_piomode = ali_set_piomode,
29595@@ -382,7 +382,7 @@ static const struct ata_port_operations ali_dma_base_ops = {
29596 * Port operations for DMA capable ALi without cable
29597 * detect
29598 */
29599-static struct ata_port_operations ali_20_port_ops = {
29600+static const struct ata_port_operations ali_20_port_ops = {
29601 .inherits = &ali_dma_base_ops,
29602 .cable_detect = ata_cable_40wire,
29603 .mode_filter = ali_20_filter,
29604@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20_port_ops = {
29605 /*
29606 * Port operations for DMA capable ALi with cable detect
29607 */
29608-static struct ata_port_operations ali_c2_port_ops = {
29609+static const struct ata_port_operations ali_c2_port_ops = {
29610 .inherits = &ali_dma_base_ops,
29611 .check_atapi_dma = ali_check_atapi_dma,
29612 .cable_detect = ali_c2_cable_detect,
29613@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2_port_ops = {
29614 /*
29615 * Port operations for DMA capable ALi with cable detect
29616 */
29617-static struct ata_port_operations ali_c4_port_ops = {
29618+static const struct ata_port_operations ali_c4_port_ops = {
29619 .inherits = &ali_dma_base_ops,
29620 .check_atapi_dma = ali_check_atapi_dma,
29621 .cable_detect = ali_c2_cable_detect,
29622@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4_port_ops = {
29623 /*
29624 * Port operations for DMA capable ALi with cable detect and LBA48
29625 */
29626-static struct ata_port_operations ali_c5_port_ops = {
29627+static const struct ata_port_operations ali_c5_port_ops = {
29628 .inherits = &ali_dma_base_ops,
29629 .check_atapi_dma = ali_check_atapi_dma,
29630 .dev_config = ali_warn_atapi_dma,
29631diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
29632index 567f3f7..c8ee0da 100644
29633--- a/drivers/ata/pata_amd.c
29634+++ b/drivers/ata/pata_amd.c
29635@@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = {
29636 .prereset = amd_pre_reset,
29637 };
29638
29639-static struct ata_port_operations amd33_port_ops = {
29640+static const struct ata_port_operations amd33_port_ops = {
29641 .inherits = &amd_base_port_ops,
29642 .cable_detect = ata_cable_40wire,
29643 .set_piomode = amd33_set_piomode,
29644 .set_dmamode = amd33_set_dmamode,
29645 };
29646
29647-static struct ata_port_operations amd66_port_ops = {
29648+static const struct ata_port_operations amd66_port_ops = {
29649 .inherits = &amd_base_port_ops,
29650 .cable_detect = ata_cable_unknown,
29651 .set_piomode = amd66_set_piomode,
29652 .set_dmamode = amd66_set_dmamode,
29653 };
29654
29655-static struct ata_port_operations amd100_port_ops = {
29656+static const struct ata_port_operations amd100_port_ops = {
29657 .inherits = &amd_base_port_ops,
29658 .cable_detect = ata_cable_unknown,
29659 .set_piomode = amd100_set_piomode,
29660 .set_dmamode = amd100_set_dmamode,
29661 };
29662
29663-static struct ata_port_operations amd133_port_ops = {
29664+static const struct ata_port_operations amd133_port_ops = {
29665 .inherits = &amd_base_port_ops,
29666 .cable_detect = amd_cable_detect,
29667 .set_piomode = amd133_set_piomode,
29668@@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = {
29669 .host_stop = nv_host_stop,
29670 };
29671
29672-static struct ata_port_operations nv100_port_ops = {
29673+static const struct ata_port_operations nv100_port_ops = {
29674 .inherits = &nv_base_port_ops,
29675 .set_piomode = nv100_set_piomode,
29676 .set_dmamode = nv100_set_dmamode,
29677 };
29678
29679-static struct ata_port_operations nv133_port_ops = {
29680+static const struct ata_port_operations nv133_port_ops = {
29681 .inherits = &nv_base_port_ops,
29682 .set_piomode = nv133_set_piomode,
29683 .set_dmamode = nv133_set_dmamode,
29684diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
29685index d332cfd..4b7eaae 100644
29686--- a/drivers/ata/pata_artop.c
29687+++ b/drivers/ata/pata_artop.c
29688@@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = {
29689 ATA_BMDMA_SHT(DRV_NAME),
29690 };
29691
29692-static struct ata_port_operations artop6210_ops = {
29693+static const struct ata_port_operations artop6210_ops = {
29694 .inherits = &ata_bmdma_port_ops,
29695 .cable_detect = ata_cable_40wire,
29696 .set_piomode = artop6210_set_piomode,
29697@@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = {
29698 .qc_defer = artop6210_qc_defer,
29699 };
29700
29701-static struct ata_port_operations artop6260_ops = {
29702+static const struct ata_port_operations artop6260_ops = {
29703 .inherits = &ata_bmdma_port_ops,
29704 .cable_detect = artop6260_cable_detect,
29705 .set_piomode = artop6260_set_piomode,
29706diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
29707index 5c129f9..7bb7ccb 100644
29708--- a/drivers/ata/pata_at32.c
29709+++ b/drivers/ata/pata_at32.c
29710@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sht = {
29711 ATA_PIO_SHT(DRV_NAME),
29712 };
29713
29714-static struct ata_port_operations at32_port_ops = {
29715+static const struct ata_port_operations at32_port_ops = {
29716 .inherits = &ata_sff_port_ops,
29717 .cable_detect = ata_cable_40wire,
29718 .set_piomode = pata_at32_set_piomode,
29719diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
29720index 41c94b1..829006d 100644
29721--- a/drivers/ata/pata_at91.c
29722+++ b/drivers/ata/pata_at91.c
29723@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at91_sht = {
29724 ATA_PIO_SHT(DRV_NAME),
29725 };
29726
29727-static struct ata_port_operations pata_at91_port_ops = {
29728+static const struct ata_port_operations pata_at91_port_ops = {
29729 .inherits = &ata_sff_port_ops,
29730
29731 .sff_data_xfer = pata_at91_data_xfer_noirq,
29732diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
29733index ae4454d..d391eb4 100644
29734--- a/drivers/ata/pata_atiixp.c
29735+++ b/drivers/ata/pata_atiixp.c
29736@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_sht = {
29737 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29738 };
29739
29740-static struct ata_port_operations atiixp_port_ops = {
29741+static const struct ata_port_operations atiixp_port_ops = {
29742 .inherits = &ata_bmdma_port_ops,
29743
29744 .qc_prep = ata_sff_dumb_qc_prep,
29745diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
29746index 6fe7ded..2a425dc 100644
29747--- a/drivers/ata/pata_atp867x.c
29748+++ b/drivers/ata/pata_atp867x.c
29749@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x_sht = {
29750 ATA_BMDMA_SHT(DRV_NAME),
29751 };
29752
29753-static struct ata_port_operations atp867x_ops = {
29754+static const struct ata_port_operations atp867x_ops = {
29755 .inherits = &ata_bmdma_port_ops,
29756 .cable_detect = atp867x_cable_detect,
29757 .set_piomode = atp867x_set_piomode,
29758diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
29759index c4b47a3..b27a367 100644
29760--- a/drivers/ata/pata_bf54x.c
29761+++ b/drivers/ata/pata_bf54x.c
29762@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sht = {
29763 .dma_boundary = ATA_DMA_BOUNDARY,
29764 };
29765
29766-static struct ata_port_operations bfin_pata_ops = {
29767+static const struct ata_port_operations bfin_pata_ops = {
29768 .inherits = &ata_sff_port_ops,
29769
29770 .set_piomode = bfin_set_piomode,
29771diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
29772index 5acf9fa..84248be 100644
29773--- a/drivers/ata/pata_cmd640.c
29774+++ b/drivers/ata/pata_cmd640.c
29775@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_sht = {
29776 ATA_BMDMA_SHT(DRV_NAME),
29777 };
29778
29779-static struct ata_port_operations cmd640_port_ops = {
29780+static const struct ata_port_operations cmd640_port_ops = {
29781 .inherits = &ata_bmdma_port_ops,
29782 /* In theory xfer_noirq is not needed once we kill the prefetcher */
29783 .sff_data_xfer = ata_sff_data_xfer_noirq,
29784diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
29785index ccd2694..c869c3d 100644
29786--- a/drivers/ata/pata_cmd64x.c
29787+++ b/drivers/ata/pata_cmd64x.c
29788@@ -271,18 +271,18 @@ static const struct ata_port_operations cmd64x_base_ops = {
29789 .set_dmamode = cmd64x_set_dmamode,
29790 };
29791
29792-static struct ata_port_operations cmd64x_port_ops = {
29793+static const struct ata_port_operations cmd64x_port_ops = {
29794 .inherits = &cmd64x_base_ops,
29795 .cable_detect = ata_cable_40wire,
29796 };
29797
29798-static struct ata_port_operations cmd646r1_port_ops = {
29799+static const struct ata_port_operations cmd646r1_port_ops = {
29800 .inherits = &cmd64x_base_ops,
29801 .bmdma_stop = cmd646r1_bmdma_stop,
29802 .cable_detect = ata_cable_40wire,
29803 };
29804
29805-static struct ata_port_operations cmd648_port_ops = {
29806+static const struct ata_port_operations cmd648_port_ops = {
29807 .inherits = &cmd64x_base_ops,
29808 .bmdma_stop = cmd648_bmdma_stop,
29809 .cable_detect = cmd648_cable_detect,
29810diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
29811index 0df83cf..d7595b0 100644
29812--- a/drivers/ata/pata_cs5520.c
29813+++ b/drivers/ata/pata_cs5520.c
29814@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_sht = {
29815 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29816 };
29817
29818-static struct ata_port_operations cs5520_port_ops = {
29819+static const struct ata_port_operations cs5520_port_ops = {
29820 .inherits = &ata_bmdma_port_ops,
29821 .qc_prep = ata_sff_dumb_qc_prep,
29822 .cable_detect = ata_cable_40wire,
29823diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
29824index c974b05..6d26b11 100644
29825--- a/drivers/ata/pata_cs5530.c
29826+++ b/drivers/ata/pata_cs5530.c
29827@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = {
29828 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29829 };
29830
29831-static struct ata_port_operations cs5530_port_ops = {
29832+static const struct ata_port_operations cs5530_port_ops = {
29833 .inherits = &ata_bmdma_port_ops,
29834
29835 .qc_prep = ata_sff_dumb_qc_prep,
29836diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
29837index 403f561..aacd26b 100644
29838--- a/drivers/ata/pata_cs5535.c
29839+++ b/drivers/ata/pata_cs5535.c
29840@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = {
29841 ATA_BMDMA_SHT(DRV_NAME),
29842 };
29843
29844-static struct ata_port_operations cs5535_port_ops = {
29845+static const struct ata_port_operations cs5535_port_ops = {
29846 .inherits = &ata_bmdma_port_ops,
29847 .cable_detect = cs5535_cable_detect,
29848 .set_piomode = cs5535_set_piomode,
29849diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
29850index 6da4cb4..de24a25 100644
29851--- a/drivers/ata/pata_cs5536.c
29852+++ b/drivers/ata/pata_cs5536.c
29853@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = {
29854 ATA_BMDMA_SHT(DRV_NAME),
29855 };
29856
29857-static struct ata_port_operations cs5536_port_ops = {
29858+static const struct ata_port_operations cs5536_port_ops = {
29859 .inherits = &ata_bmdma_port_ops,
29860 .cable_detect = cs5536_cable_detect,
29861 .set_piomode = cs5536_set_piomode,
29862diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
29863index 8fb040b..b16a9c9 100644
29864--- a/drivers/ata/pata_cypress.c
29865+++ b/drivers/ata/pata_cypress.c
29866@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c693_sht = {
29867 ATA_BMDMA_SHT(DRV_NAME),
29868 };
29869
29870-static struct ata_port_operations cy82c693_port_ops = {
29871+static const struct ata_port_operations cy82c693_port_ops = {
29872 .inherits = &ata_bmdma_port_ops,
29873 .cable_detect = ata_cable_40wire,
29874 .set_piomode = cy82c693_set_piomode,
29875diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
29876index 2a6412f..555ee11 100644
29877--- a/drivers/ata/pata_efar.c
29878+++ b/drivers/ata/pata_efar.c
29879@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sht = {
29880 ATA_BMDMA_SHT(DRV_NAME),
29881 };
29882
29883-static struct ata_port_operations efar_ops = {
29884+static const struct ata_port_operations efar_ops = {
29885 .inherits = &ata_bmdma_port_ops,
29886 .cable_detect = efar_cable_detect,
29887 .set_piomode = efar_set_piomode,
29888diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
29889index b9d8836..0b92030 100644
29890--- a/drivers/ata/pata_hpt366.c
29891+++ b/drivers/ata/pata_hpt366.c
29892@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_sht = {
29893 * Configuration for HPT366/68
29894 */
29895
29896-static struct ata_port_operations hpt366_port_ops = {
29897+static const struct ata_port_operations hpt366_port_ops = {
29898 .inherits = &ata_bmdma_port_ops,
29899 .cable_detect = hpt36x_cable_detect,
29900 .mode_filter = hpt366_filter,
29901diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
29902index 5af7f19..00c4980 100644
29903--- a/drivers/ata/pata_hpt37x.c
29904+++ b/drivers/ata/pata_hpt37x.c
29905@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_sht = {
29906 * Configuration for HPT370
29907 */
29908
29909-static struct ata_port_operations hpt370_port_ops = {
29910+static const struct ata_port_operations hpt370_port_ops = {
29911 .inherits = &ata_bmdma_port_ops,
29912
29913 .bmdma_stop = hpt370_bmdma_stop,
29914@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370_port_ops = {
29915 * Configuration for HPT370A. Close to 370 but less filters
29916 */
29917
29918-static struct ata_port_operations hpt370a_port_ops = {
29919+static const struct ata_port_operations hpt370a_port_ops = {
29920 .inherits = &hpt370_port_ops,
29921 .mode_filter = hpt370a_filter,
29922 };
29923@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370a_port_ops = {
29924 * and DMA mode setting functionality.
29925 */
29926
29927-static struct ata_port_operations hpt372_port_ops = {
29928+static const struct ata_port_operations hpt372_port_ops = {
29929 .inherits = &ata_bmdma_port_ops,
29930
29931 .bmdma_stop = hpt37x_bmdma_stop,
29932@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372_port_ops = {
29933 * but we have a different cable detection procedure for function 1.
29934 */
29935
29936-static struct ata_port_operations hpt374_fn1_port_ops = {
29937+static const struct ata_port_operations hpt374_fn1_port_ops = {
29938 .inherits = &hpt372_port_ops,
29939 .prereset = hpt374_fn1_pre_reset,
29940 };
29941diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
29942index 100f227..2e39382 100644
29943--- a/drivers/ata/pata_hpt3x2n.c
29944+++ b/drivers/ata/pata_hpt3x2n.c
29945@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n_sht = {
29946 * Configuration for HPT3x2n.
29947 */
29948
29949-static struct ata_port_operations hpt3x2n_port_ops = {
29950+static const struct ata_port_operations hpt3x2n_port_ops = {
29951 .inherits = &ata_bmdma_port_ops,
29952
29953 .bmdma_stop = hpt3x2n_bmdma_stop,
29954diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
29955index 7e31025..6fca8f4 100644
29956--- a/drivers/ata/pata_hpt3x3.c
29957+++ b/drivers/ata/pata_hpt3x3.c
29958@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = {
29959 ATA_BMDMA_SHT(DRV_NAME),
29960 };
29961
29962-static struct ata_port_operations hpt3x3_port_ops = {
29963+static const struct ata_port_operations hpt3x3_port_ops = {
29964 .inherits = &ata_bmdma_port_ops,
29965 .cable_detect = ata_cable_40wire,
29966 .set_piomode = hpt3x3_set_piomode,
29967diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
29968index b663b7f..9a26c2a 100644
29969--- a/drivers/ata/pata_icside.c
29970+++ b/drivers/ata/pata_icside.c
29971@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
29972 }
29973 }
29974
29975-static struct ata_port_operations pata_icside_port_ops = {
29976+static const struct ata_port_operations pata_icside_port_ops = {
29977 .inherits = &ata_sff_port_ops,
29978 /* no need to build any PRD tables for DMA */
29979 .qc_prep = ata_noop_qc_prep,
29980diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
29981index 4bceb88..457dfb6 100644
29982--- a/drivers/ata/pata_isapnp.c
29983+++ b/drivers/ata/pata_isapnp.c
29984@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = {
29985 ATA_PIO_SHT(DRV_NAME),
29986 };
29987
29988-static struct ata_port_operations isapnp_port_ops = {
29989+static const struct ata_port_operations isapnp_port_ops = {
29990 .inherits = &ata_sff_port_ops,
29991 .cable_detect = ata_cable_40wire,
29992 };
29993
29994-static struct ata_port_operations isapnp_noalt_port_ops = {
29995+static const struct ata_port_operations isapnp_noalt_port_ops = {
29996 .inherits = &ata_sff_port_ops,
29997 .cable_detect = ata_cable_40wire,
29998 /* No altstatus so we don't want to use the lost interrupt poll */
29999diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
30000index f156da8..24976e2 100644
30001--- a/drivers/ata/pata_it8213.c
30002+++ b/drivers/ata/pata_it8213.c
30003@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_sht = {
30004 };
30005
30006
30007-static struct ata_port_operations it8213_ops = {
30008+static const struct ata_port_operations it8213_ops = {
30009 .inherits = &ata_bmdma_port_ops,
30010 .cable_detect = it8213_cable_detect,
30011 .set_piomode = it8213_set_piomode,
30012diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
30013index 188bc2f..ca9e785 100644
30014--- a/drivers/ata/pata_it821x.c
30015+++ b/drivers/ata/pata_it821x.c
30016@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_sht = {
30017 ATA_BMDMA_SHT(DRV_NAME),
30018 };
30019
30020-static struct ata_port_operations it821x_smart_port_ops = {
30021+static const struct ata_port_operations it821x_smart_port_ops = {
30022 .inherits = &ata_bmdma_port_ops,
30023
30024 .check_atapi_dma= it821x_check_atapi_dma,
30025@@ -814,7 +814,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
30026 .port_start = it821x_port_start,
30027 };
30028
30029-static struct ata_port_operations it821x_passthru_port_ops = {
30030+static const struct ata_port_operations it821x_passthru_port_ops = {
30031 .inherits = &ata_bmdma_port_ops,
30032
30033 .check_atapi_dma= it821x_check_atapi_dma,
30034@@ -830,7 +830,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
30035 .port_start = it821x_port_start,
30036 };
30037
30038-static struct ata_port_operations it821x_rdc_port_ops = {
30039+static const struct ata_port_operations it821x_rdc_port_ops = {
30040 .inherits = &ata_bmdma_port_ops,
30041
30042 .check_atapi_dma= it821x_check_atapi_dma,
30043diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
30044index ba54b08..4b952b7 100644
30045--- a/drivers/ata/pata_ixp4xx_cf.c
30046+++ b/drivers/ata/pata_ixp4xx_cf.c
30047@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = {
30048 ATA_PIO_SHT(DRV_NAME),
30049 };
30050
30051-static struct ata_port_operations ixp4xx_port_ops = {
30052+static const struct ata_port_operations ixp4xx_port_ops = {
30053 .inherits = &ata_sff_port_ops,
30054 .sff_data_xfer = ixp4xx_mmio_data_xfer,
30055 .cable_detect = ata_cable_40wire,
30056diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
30057index 3a1474a..434b0ff 100644
30058--- a/drivers/ata/pata_jmicron.c
30059+++ b/drivers/ata/pata_jmicron.c
30060@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = {
30061 ATA_BMDMA_SHT(DRV_NAME),
30062 };
30063
30064-static struct ata_port_operations jmicron_ops = {
30065+static const struct ata_port_operations jmicron_ops = {
30066 .inherits = &ata_bmdma_port_ops,
30067 .prereset = jmicron_pre_reset,
30068 };
30069diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
30070index 6932e56..220e71d 100644
30071--- a/drivers/ata/pata_legacy.c
30072+++ b/drivers/ata/pata_legacy.c
30073@@ -106,7 +106,7 @@ struct legacy_probe {
30074
30075 struct legacy_controller {
30076 const char *name;
30077- struct ata_port_operations *ops;
30078+ const struct ata_port_operations *ops;
30079 unsigned int pio_mask;
30080 unsigned int flags;
30081 unsigned int pflags;
30082@@ -223,12 +223,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
30083 * pio_mask as well.
30084 */
30085
30086-static struct ata_port_operations simple_port_ops = {
30087+static const struct ata_port_operations simple_port_ops = {
30088 .inherits = &legacy_base_port_ops,
30089 .sff_data_xfer = ata_sff_data_xfer_noirq,
30090 };
30091
30092-static struct ata_port_operations legacy_port_ops = {
30093+static const struct ata_port_operations legacy_port_ops = {
30094 .inherits = &legacy_base_port_ops,
30095 .sff_data_xfer = ata_sff_data_xfer_noirq,
30096 .set_mode = legacy_set_mode,
30097@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
30098 return buflen;
30099 }
30100
30101-static struct ata_port_operations pdc20230_port_ops = {
30102+static const struct ata_port_operations pdc20230_port_ops = {
30103 .inherits = &legacy_base_port_ops,
30104 .set_piomode = pdc20230_set_piomode,
30105 .sff_data_xfer = pdc_data_xfer_vlb,
30106@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
30107 ioread8(ap->ioaddr.status_addr);
30108 }
30109
30110-static struct ata_port_operations ht6560a_port_ops = {
30111+static const struct ata_port_operations ht6560a_port_ops = {
30112 .inherits = &legacy_base_port_ops,
30113 .set_piomode = ht6560a_set_piomode,
30114 };
30115@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
30116 ioread8(ap->ioaddr.status_addr);
30117 }
30118
30119-static struct ata_port_operations ht6560b_port_ops = {
30120+static const struct ata_port_operations ht6560b_port_ops = {
30121 .inherits = &legacy_base_port_ops,
30122 .set_piomode = ht6560b_set_piomode,
30123 };
30124@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
30125 }
30126
30127
30128-static struct ata_port_operations opti82c611a_port_ops = {
30129+static const struct ata_port_operations opti82c611a_port_ops = {
30130 .inherits = &legacy_base_port_ops,
30131 .set_piomode = opti82c611a_set_piomode,
30132 };
30133@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
30134 return ata_sff_qc_issue(qc);
30135 }
30136
30137-static struct ata_port_operations opti82c46x_port_ops = {
30138+static const struct ata_port_operations opti82c46x_port_ops = {
30139 .inherits = &legacy_base_port_ops,
30140 .set_piomode = opti82c46x_set_piomode,
30141 .qc_issue = opti82c46x_qc_issue,
30142@@ -771,20 +771,20 @@ static int qdi_port(struct platform_device *dev,
30143 return 0;
30144 }
30145
30146-static struct ata_port_operations qdi6500_port_ops = {
30147+static const struct ata_port_operations qdi6500_port_ops = {
30148 .inherits = &legacy_base_port_ops,
30149 .set_piomode = qdi6500_set_piomode,
30150 .qc_issue = qdi_qc_issue,
30151 .sff_data_xfer = vlb32_data_xfer,
30152 };
30153
30154-static struct ata_port_operations qdi6580_port_ops = {
30155+static const struct ata_port_operations qdi6580_port_ops = {
30156 .inherits = &legacy_base_port_ops,
30157 .set_piomode = qdi6580_set_piomode,
30158 .sff_data_xfer = vlb32_data_xfer,
30159 };
30160
30161-static struct ata_port_operations qdi6580dp_port_ops = {
30162+static const struct ata_port_operations qdi6580dp_port_ops = {
30163 .inherits = &legacy_base_port_ops,
30164 .set_piomode = qdi6580dp_set_piomode,
30165 .sff_data_xfer = vlb32_data_xfer,
30166@@ -855,7 +855,7 @@ static int winbond_port(struct platform_device *dev,
30167 return 0;
30168 }
30169
30170-static struct ata_port_operations winbond_port_ops = {
30171+static const struct ata_port_operations winbond_port_ops = {
30172 .inherits = &legacy_base_port_ops,
30173 .set_piomode = winbond_set_piomode,
30174 .sff_data_xfer = vlb32_data_xfer,
30175@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
30176 int pio_modes = controller->pio_mask;
30177 unsigned long io = probe->port;
30178 u32 mask = (1 << probe->slot);
30179- struct ata_port_operations *ops = controller->ops;
30180+ const struct ata_port_operations *ops = controller->ops;
30181 struct legacy_data *ld = &legacy_data[probe->slot];
30182 struct ata_host *host = NULL;
30183 struct ata_port *ap;
30184diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
30185index 2096fb7..4d090fc 100644
30186--- a/drivers/ata/pata_marvell.c
30187+++ b/drivers/ata/pata_marvell.c
30188@@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = {
30189 ATA_BMDMA_SHT(DRV_NAME),
30190 };
30191
30192-static struct ata_port_operations marvell_ops = {
30193+static const struct ata_port_operations marvell_ops = {
30194 .inherits = &ata_bmdma_port_ops,
30195 .cable_detect = marvell_cable_detect,
30196 .prereset = marvell_pre_reset,
30197diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
30198index 99d41be..7d56aa8 100644
30199--- a/drivers/ata/pata_mpc52xx.c
30200+++ b/drivers/ata/pata_mpc52xx.c
30201@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
30202 ATA_PIO_SHT(DRV_NAME),
30203 };
30204
30205-static struct ata_port_operations mpc52xx_ata_port_ops = {
30206+static const struct ata_port_operations mpc52xx_ata_port_ops = {
30207 .inherits = &ata_bmdma_port_ops,
30208 .sff_dev_select = mpc52xx_ata_dev_select,
30209 .set_piomode = mpc52xx_ata_set_piomode,
30210diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
30211index b21f002..0a27e7f 100644
30212--- a/drivers/ata/pata_mpiix.c
30213+++ b/drivers/ata/pata_mpiix.c
30214@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = {
30215 ATA_PIO_SHT(DRV_NAME),
30216 };
30217
30218-static struct ata_port_operations mpiix_port_ops = {
30219+static const struct ata_port_operations mpiix_port_ops = {
30220 .inherits = &ata_sff_port_ops,
30221 .qc_issue = mpiix_qc_issue,
30222 .cable_detect = ata_cable_40wire,
30223diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
30224index f0d52f7..89c3be3 100644
30225--- a/drivers/ata/pata_netcell.c
30226+++ b/drivers/ata/pata_netcell.c
30227@@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = {
30228 ATA_BMDMA_SHT(DRV_NAME),
30229 };
30230
30231-static struct ata_port_operations netcell_ops = {
30232+static const struct ata_port_operations netcell_ops = {
30233 .inherits = &ata_bmdma_port_ops,
30234 .cable_detect = ata_cable_80wire,
30235 .read_id = netcell_read_id,
30236diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
30237index dd53a66..a3f4317 100644
30238--- a/drivers/ata/pata_ninja32.c
30239+++ b/drivers/ata/pata_ninja32.c
30240@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = {
30241 ATA_BMDMA_SHT(DRV_NAME),
30242 };
30243
30244-static struct ata_port_operations ninja32_port_ops = {
30245+static const struct ata_port_operations ninja32_port_ops = {
30246 .inherits = &ata_bmdma_port_ops,
30247 .sff_dev_select = ninja32_dev_select,
30248 .cable_detect = ata_cable_40wire,
30249diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
30250index ca53fac..9aa93ef 100644
30251--- a/drivers/ata/pata_ns87410.c
30252+++ b/drivers/ata/pata_ns87410.c
30253@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = {
30254 ATA_PIO_SHT(DRV_NAME),
30255 };
30256
30257-static struct ata_port_operations ns87410_port_ops = {
30258+static const struct ata_port_operations ns87410_port_ops = {
30259 .inherits = &ata_sff_port_ops,
30260 .qc_issue = ns87410_qc_issue,
30261 .cable_detect = ata_cable_40wire,
30262diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
30263index 773b159..55f454e 100644
30264--- a/drivers/ata/pata_ns87415.c
30265+++ b/drivers/ata/pata_ns87415.c
30266@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap)
30267 }
30268 #endif /* 87560 SuperIO Support */
30269
30270-static struct ata_port_operations ns87415_pata_ops = {
30271+static const struct ata_port_operations ns87415_pata_ops = {
30272 .inherits = &ata_bmdma_port_ops,
30273
30274 .check_atapi_dma = ns87415_check_atapi_dma,
30275@@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = {
30276 };
30277
30278 #if defined(CONFIG_SUPERIO)
30279-static struct ata_port_operations ns87560_pata_ops = {
30280+static const struct ata_port_operations ns87560_pata_ops = {
30281 .inherits = &ns87415_pata_ops,
30282 .sff_tf_read = ns87560_tf_read,
30283 .sff_check_status = ns87560_check_status,
30284diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
30285index d6f6956..639295b 100644
30286--- a/drivers/ata/pata_octeon_cf.c
30287+++ b/drivers/ata/pata_octeon_cf.c
30288@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
30289 return 0;
30290 }
30291
30292+/* cannot be const */
30293 static struct ata_port_operations octeon_cf_ops = {
30294 .inherits = &ata_sff_port_ops,
30295 .check_atapi_dma = octeon_cf_check_atapi_dma,
30296diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
30297index 84ac503..adee1cd 100644
30298--- a/drivers/ata/pata_oldpiix.c
30299+++ b/drivers/ata/pata_oldpiix.c
30300@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = {
30301 ATA_BMDMA_SHT(DRV_NAME),
30302 };
30303
30304-static struct ata_port_operations oldpiix_pata_ops = {
30305+static const struct ata_port_operations oldpiix_pata_ops = {
30306 .inherits = &ata_bmdma_port_ops,
30307 .qc_issue = oldpiix_qc_issue,
30308 .cable_detect = ata_cable_40wire,
30309diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
30310index 99eddda..3a4c0aa 100644
30311--- a/drivers/ata/pata_opti.c
30312+++ b/drivers/ata/pata_opti.c
30313@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = {
30314 ATA_PIO_SHT(DRV_NAME),
30315 };
30316
30317-static struct ata_port_operations opti_port_ops = {
30318+static const struct ata_port_operations opti_port_ops = {
30319 .inherits = &ata_sff_port_ops,
30320 .cable_detect = ata_cable_40wire,
30321 .set_piomode = opti_set_piomode,
30322diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
30323index 86885a4..8e9968d 100644
30324--- a/drivers/ata/pata_optidma.c
30325+++ b/drivers/ata/pata_optidma.c
30326@@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = {
30327 ATA_BMDMA_SHT(DRV_NAME),
30328 };
30329
30330-static struct ata_port_operations optidma_port_ops = {
30331+static const struct ata_port_operations optidma_port_ops = {
30332 .inherits = &ata_bmdma_port_ops,
30333 .cable_detect = ata_cable_40wire,
30334 .set_piomode = optidma_set_pio_mode,
30335@@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
30336 .prereset = optidma_pre_reset,
30337 };
30338
30339-static struct ata_port_operations optiplus_port_ops = {
30340+static const struct ata_port_operations optiplus_port_ops = {
30341 .inherits = &optidma_port_ops,
30342 .set_piomode = optiplus_set_pio_mode,
30343 .set_dmamode = optiplus_set_dma_mode,
30344diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
30345index 11fb4cc..1a14022 100644
30346--- a/drivers/ata/pata_palmld.c
30347+++ b/drivers/ata/pata_palmld.c
30348@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = {
30349 ATA_PIO_SHT(DRV_NAME),
30350 };
30351
30352-static struct ata_port_operations palmld_port_ops = {
30353+static const struct ata_port_operations palmld_port_ops = {
30354 .inherits = &ata_sff_port_ops,
30355 .sff_data_xfer = ata_sff_data_xfer_noirq,
30356 .cable_detect = ata_cable_40wire,
30357diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
30358index dc99e26..7f4b1e4 100644
30359--- a/drivers/ata/pata_pcmcia.c
30360+++ b/drivers/ata/pata_pcmcia.c
30361@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_sht = {
30362 ATA_PIO_SHT(DRV_NAME),
30363 };
30364
30365-static struct ata_port_operations pcmcia_port_ops = {
30366+static const struct ata_port_operations pcmcia_port_ops = {
30367 .inherits = &ata_sff_port_ops,
30368 .sff_data_xfer = ata_sff_data_xfer_noirq,
30369 .cable_detect = ata_cable_40wire,
30370 .set_mode = pcmcia_set_mode,
30371 };
30372
30373-static struct ata_port_operations pcmcia_8bit_port_ops = {
30374+static const struct ata_port_operations pcmcia_8bit_port_ops = {
30375 .inherits = &ata_sff_port_ops,
30376 .sff_data_xfer = ata_data_xfer_8bit,
30377 .cable_detect = ata_cable_40wire,
30378@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
30379 unsigned long io_base, ctl_base;
30380 void __iomem *io_addr, *ctl_addr;
30381 int n_ports = 1;
30382- struct ata_port_operations *ops = &pcmcia_port_ops;
30383+ const struct ata_port_operations *ops = &pcmcia_port_ops;
30384
30385 info = kzalloc(sizeof(*info), GFP_KERNEL);
30386 if (info == NULL)
30387diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
30388index ca5cad0..3a1f125 100644
30389--- a/drivers/ata/pata_pdc2027x.c
30390+++ b/drivers/ata/pata_pdc2027x.c
30391@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = {
30392 ATA_BMDMA_SHT(DRV_NAME),
30393 };
30394
30395-static struct ata_port_operations pdc2027x_pata100_ops = {
30396+static const struct ata_port_operations pdc2027x_pata100_ops = {
30397 .inherits = &ata_bmdma_port_ops,
30398 .check_atapi_dma = pdc2027x_check_atapi_dma,
30399 .cable_detect = pdc2027x_cable_detect,
30400 .prereset = pdc2027x_prereset,
30401 };
30402
30403-static struct ata_port_operations pdc2027x_pata133_ops = {
30404+static const struct ata_port_operations pdc2027x_pata133_ops = {
30405 .inherits = &pdc2027x_pata100_ops,
30406 .mode_filter = pdc2027x_mode_filter,
30407 .set_piomode = pdc2027x_set_piomode,
30408diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
30409index 2911120..4bf62aa 100644
30410--- a/drivers/ata/pata_pdc202xx_old.c
30411+++ b/drivers/ata/pata_pdc202xx_old.c
30412@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = {
30413 ATA_BMDMA_SHT(DRV_NAME),
30414 };
30415
30416-static struct ata_port_operations pdc2024x_port_ops = {
30417+static const struct ata_port_operations pdc2024x_port_ops = {
30418 .inherits = &ata_bmdma_port_ops,
30419
30420 .cable_detect = ata_cable_40wire,
30421@@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
30422 .sff_exec_command = pdc202xx_exec_command,
30423 };
30424
30425-static struct ata_port_operations pdc2026x_port_ops = {
30426+static const struct ata_port_operations pdc2026x_port_ops = {
30427 .inherits = &pdc2024x_port_ops,
30428
30429 .check_atapi_dma = pdc2026x_check_atapi_dma,
30430diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
30431index 3f6ebc6..a18c358 100644
30432--- a/drivers/ata/pata_platform.c
30433+++ b/drivers/ata/pata_platform.c
30434@@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = {
30435 ATA_PIO_SHT(DRV_NAME),
30436 };
30437
30438-static struct ata_port_operations pata_platform_port_ops = {
30439+static const struct ata_port_operations pata_platform_port_ops = {
30440 .inherits = &ata_sff_port_ops,
30441 .sff_data_xfer = ata_sff_data_xfer_noirq,
30442 .cable_detect = ata_cable_unknown,
30443diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
30444index 45879dc..165a9f9 100644
30445--- a/drivers/ata/pata_qdi.c
30446+++ b/drivers/ata/pata_qdi.c
30447@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = {
30448 ATA_PIO_SHT(DRV_NAME),
30449 };
30450
30451-static struct ata_port_operations qdi6500_port_ops = {
30452+static const struct ata_port_operations qdi6500_port_ops = {
30453 .inherits = &ata_sff_port_ops,
30454 .qc_issue = qdi_qc_issue,
30455 .sff_data_xfer = qdi_data_xfer,
30456@@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = {
30457 .set_piomode = qdi6500_set_piomode,
30458 };
30459
30460-static struct ata_port_operations qdi6580_port_ops = {
30461+static const struct ata_port_operations qdi6580_port_ops = {
30462 .inherits = &qdi6500_port_ops,
30463 .set_piomode = qdi6580_set_piomode,
30464 };
30465diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
30466index 4401b33..716c5cc 100644
30467--- a/drivers/ata/pata_radisys.c
30468+++ b/drivers/ata/pata_radisys.c
30469@@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = {
30470 ATA_BMDMA_SHT(DRV_NAME),
30471 };
30472
30473-static struct ata_port_operations radisys_pata_ops = {
30474+static const struct ata_port_operations radisys_pata_ops = {
30475 .inherits = &ata_bmdma_port_ops,
30476 .qc_issue = radisys_qc_issue,
30477 .cable_detect = ata_cable_unknown,
30478diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
30479index 45f1e10..fab6bca 100644
30480--- a/drivers/ata/pata_rb532_cf.c
30481+++ b/drivers/ata/pata_rb532_cf.c
30482@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
30483 return IRQ_HANDLED;
30484 }
30485
30486-static struct ata_port_operations rb532_pata_port_ops = {
30487+static const struct ata_port_operations rb532_pata_port_ops = {
30488 .inherits = &ata_sff_port_ops,
30489 .sff_data_xfer = ata_sff_data_xfer32,
30490 };
30491diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
30492index c843a1e..b5853c3 100644
30493--- a/drivers/ata/pata_rdc.c
30494+++ b/drivers/ata/pata_rdc.c
30495@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
30496 pci_write_config_byte(dev, 0x48, udma_enable);
30497 }
30498
30499-static struct ata_port_operations rdc_pata_ops = {
30500+static const struct ata_port_operations rdc_pata_ops = {
30501 .inherits = &ata_bmdma32_port_ops,
30502 .cable_detect = rdc_pata_cable_detect,
30503 .set_piomode = rdc_set_piomode,
30504diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
30505index a5e4dfe..080c8c9 100644
30506--- a/drivers/ata/pata_rz1000.c
30507+++ b/drivers/ata/pata_rz1000.c
30508@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = {
30509 ATA_PIO_SHT(DRV_NAME),
30510 };
30511
30512-static struct ata_port_operations rz1000_port_ops = {
30513+static const struct ata_port_operations rz1000_port_ops = {
30514 .inherits = &ata_sff_port_ops,
30515 .cable_detect = ata_cable_40wire,
30516 .set_mode = rz1000_set_mode,
30517diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
30518index 3bbed83..e309daf 100644
30519--- a/drivers/ata/pata_sc1200.c
30520+++ b/drivers/ata/pata_sc1200.c
30521@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = {
30522 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
30523 };
30524
30525-static struct ata_port_operations sc1200_port_ops = {
30526+static const struct ata_port_operations sc1200_port_ops = {
30527 .inherits = &ata_bmdma_port_ops,
30528 .qc_prep = ata_sff_dumb_qc_prep,
30529 .qc_issue = sc1200_qc_issue,
30530diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
30531index 4257d6b..4c1d9d5 100644
30532--- a/drivers/ata/pata_scc.c
30533+++ b/drivers/ata/pata_scc.c
30534@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht = {
30535 ATA_BMDMA_SHT(DRV_NAME),
30536 };
30537
30538-static struct ata_port_operations scc_pata_ops = {
30539+static const struct ata_port_operations scc_pata_ops = {
30540 .inherits = &ata_bmdma_port_ops,
30541
30542 .set_piomode = scc_set_piomode,
30543diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
30544index 99cceb4..e2e0a87 100644
30545--- a/drivers/ata/pata_sch.c
30546+++ b/drivers/ata/pata_sch.c
30547@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = {
30548 ATA_BMDMA_SHT(DRV_NAME),
30549 };
30550
30551-static struct ata_port_operations sch_pata_ops = {
30552+static const struct ata_port_operations sch_pata_ops = {
30553 .inherits = &ata_bmdma_port_ops,
30554 .cable_detect = ata_cable_unknown,
30555 .set_piomode = sch_set_piomode,
30556diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
30557index beaed12..39969f1 100644
30558--- a/drivers/ata/pata_serverworks.c
30559+++ b/drivers/ata/pata_serverworks.c
30560@@ -299,7 +299,7 @@ static struct scsi_host_template serverworks_sht = {
30561 ATA_BMDMA_SHT(DRV_NAME),
30562 };
30563
30564-static struct ata_port_operations serverworks_osb4_port_ops = {
30565+static const struct ata_port_operations serverworks_osb4_port_ops = {
30566 .inherits = &ata_bmdma_port_ops,
30567 .cable_detect = serverworks_cable_detect,
30568 .mode_filter = serverworks_osb4_filter,
30569@@ -307,7 +307,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
30570 .set_dmamode = serverworks_set_dmamode,
30571 };
30572
30573-static struct ata_port_operations serverworks_csb_port_ops = {
30574+static const struct ata_port_operations serverworks_csb_port_ops = {
30575 .inherits = &serverworks_osb4_port_ops,
30576 .mode_filter = serverworks_csb_filter,
30577 };
30578diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
30579index a2ace48..0463b44 100644
30580--- a/drivers/ata/pata_sil680.c
30581+++ b/drivers/ata/pata_sil680.c
30582@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_sht = {
30583 ATA_BMDMA_SHT(DRV_NAME),
30584 };
30585
30586-static struct ata_port_operations sil680_port_ops = {
30587+static const struct ata_port_operations sil680_port_ops = {
30588 .inherits = &ata_bmdma32_port_ops,
30589 .cable_detect = sil680_cable_detect,
30590 .set_piomode = sil680_set_piomode,
30591diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
30592index 488e77b..b3724d5 100644
30593--- a/drivers/ata/pata_sis.c
30594+++ b/drivers/ata/pata_sis.c
30595@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = {
30596 ATA_BMDMA_SHT(DRV_NAME),
30597 };
30598
30599-static struct ata_port_operations sis_133_for_sata_ops = {
30600+static const struct ata_port_operations sis_133_for_sata_ops = {
30601 .inherits = &ata_bmdma_port_ops,
30602 .set_piomode = sis_133_set_piomode,
30603 .set_dmamode = sis_133_set_dmamode,
30604 .cable_detect = sis_133_cable_detect,
30605 };
30606
30607-static struct ata_port_operations sis_base_ops = {
30608+static const struct ata_port_operations sis_base_ops = {
30609 .inherits = &ata_bmdma_port_ops,
30610 .prereset = sis_pre_reset,
30611 };
30612
30613-static struct ata_port_operations sis_133_ops = {
30614+static const struct ata_port_operations sis_133_ops = {
30615 .inherits = &sis_base_ops,
30616 .set_piomode = sis_133_set_piomode,
30617 .set_dmamode = sis_133_set_dmamode,
30618 .cable_detect = sis_133_cable_detect,
30619 };
30620
30621-static struct ata_port_operations sis_133_early_ops = {
30622+static const struct ata_port_operations sis_133_early_ops = {
30623 .inherits = &sis_base_ops,
30624 .set_piomode = sis_100_set_piomode,
30625 .set_dmamode = sis_133_early_set_dmamode,
30626 .cable_detect = sis_66_cable_detect,
30627 };
30628
30629-static struct ata_port_operations sis_100_ops = {
30630+static const struct ata_port_operations sis_100_ops = {
30631 .inherits = &sis_base_ops,
30632 .set_piomode = sis_100_set_piomode,
30633 .set_dmamode = sis_100_set_dmamode,
30634 .cable_detect = sis_66_cable_detect,
30635 };
30636
30637-static struct ata_port_operations sis_66_ops = {
30638+static const struct ata_port_operations sis_66_ops = {
30639 .inherits = &sis_base_ops,
30640 .set_piomode = sis_old_set_piomode,
30641 .set_dmamode = sis_66_set_dmamode,
30642 .cable_detect = sis_66_cable_detect,
30643 };
30644
30645-static struct ata_port_operations sis_old_ops = {
30646+static const struct ata_port_operations sis_old_ops = {
30647 .inherits = &sis_base_ops,
30648 .set_piomode = sis_old_set_piomode,
30649 .set_dmamode = sis_old_set_dmamode,
30650diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
30651index 29f733c..43e9ca0 100644
30652--- a/drivers/ata/pata_sl82c105.c
30653+++ b/drivers/ata/pata_sl82c105.c
30654@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = {
30655 ATA_BMDMA_SHT(DRV_NAME),
30656 };
30657
30658-static struct ata_port_operations sl82c105_port_ops = {
30659+static const struct ata_port_operations sl82c105_port_ops = {
30660 .inherits = &ata_bmdma_port_ops,
30661 .qc_defer = sl82c105_qc_defer,
30662 .bmdma_start = sl82c105_bmdma_start,
30663diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
30664index f1f13ff..df39e99 100644
30665--- a/drivers/ata/pata_triflex.c
30666+++ b/drivers/ata/pata_triflex.c
30667@@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = {
30668 ATA_BMDMA_SHT(DRV_NAME),
30669 };
30670
30671-static struct ata_port_operations triflex_port_ops = {
30672+static const struct ata_port_operations triflex_port_ops = {
30673 .inherits = &ata_bmdma_port_ops,
30674 .bmdma_start = triflex_bmdma_start,
30675 .bmdma_stop = triflex_bmdma_stop,
30676diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
30677index 1d73b8d..98a4b29 100644
30678--- a/drivers/ata/pata_via.c
30679+++ b/drivers/ata/pata_via.c
30680@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht = {
30681 ATA_BMDMA_SHT(DRV_NAME),
30682 };
30683
30684-static struct ata_port_operations via_port_ops = {
30685+static const struct ata_port_operations via_port_ops = {
30686 .inherits = &ata_bmdma_port_ops,
30687 .cable_detect = via_cable_detect,
30688 .set_piomode = via_set_piomode,
30689@@ -429,7 +429,7 @@ static struct ata_port_operations via_port_ops = {
30690 .port_start = via_port_start,
30691 };
30692
30693-static struct ata_port_operations via_port_ops_noirq = {
30694+static const struct ata_port_operations via_port_ops_noirq = {
30695 .inherits = &via_port_ops,
30696 .sff_data_xfer = ata_sff_data_xfer_noirq,
30697 };
30698diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
30699index 6d8619b..ad511c4 100644
30700--- a/drivers/ata/pata_winbond.c
30701+++ b/drivers/ata/pata_winbond.c
30702@@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = {
30703 ATA_PIO_SHT(DRV_NAME),
30704 };
30705
30706-static struct ata_port_operations winbond_port_ops = {
30707+static const struct ata_port_operations winbond_port_ops = {
30708 .inherits = &ata_sff_port_ops,
30709 .sff_data_xfer = winbond_data_xfer,
30710 .cable_detect = ata_cable_40wire,
30711diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
30712index 6c65b07..f996ec7 100644
30713--- a/drivers/ata/pdc_adma.c
30714+++ b/drivers/ata/pdc_adma.c
30715@@ -145,7 +145,7 @@ static struct scsi_host_template adma_ata_sht = {
30716 .dma_boundary = ADMA_DMA_BOUNDARY,
30717 };
30718
30719-static struct ata_port_operations adma_ata_ops = {
30720+static const struct ata_port_operations adma_ata_ops = {
30721 .inherits = &ata_sff_port_ops,
30722
30723 .lost_interrupt = ATA_OP_NULL,
30724diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
30725index 172b57e..c49bc1e 100644
30726--- a/drivers/ata/sata_fsl.c
30727+++ b/drivers/ata/sata_fsl.c
30728@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fsl_sht = {
30729 .dma_boundary = ATA_DMA_BOUNDARY,
30730 };
30731
30732-static struct ata_port_operations sata_fsl_ops = {
30733+static const struct ata_port_operations sata_fsl_ops = {
30734 .inherits = &sata_pmp_port_ops,
30735
30736 .qc_defer = ata_std_qc_defer,
30737diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
30738index 4406902..60603ef 100644
30739--- a/drivers/ata/sata_inic162x.c
30740+++ b/drivers/ata/sata_inic162x.c
30741@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_port *ap)
30742 return 0;
30743 }
30744
30745-static struct ata_port_operations inic_port_ops = {
30746+static const struct ata_port_operations inic_port_ops = {
30747 .inherits = &sata_port_ops,
30748
30749 .check_atapi_dma = inic_check_atapi_dma,
30750diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
30751index cf41126..8107be6 100644
30752--- a/drivers/ata/sata_mv.c
30753+++ b/drivers/ata/sata_mv.c
30754@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht = {
30755 .dma_boundary = MV_DMA_BOUNDARY,
30756 };
30757
30758-static struct ata_port_operations mv5_ops = {
30759+static const struct ata_port_operations mv5_ops = {
30760 .inherits = &ata_sff_port_ops,
30761
30762 .lost_interrupt = ATA_OP_NULL,
30763@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_ops = {
30764 .port_stop = mv_port_stop,
30765 };
30766
30767-static struct ata_port_operations mv6_ops = {
30768+static const struct ata_port_operations mv6_ops = {
30769 .inherits = &mv5_ops,
30770 .dev_config = mv6_dev_config,
30771 .scr_read = mv_scr_read,
30772@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_ops = {
30773 .bmdma_status = mv_bmdma_status,
30774 };
30775
30776-static struct ata_port_operations mv_iie_ops = {
30777+static const struct ata_port_operations mv_iie_ops = {
30778 .inherits = &mv6_ops,
30779 .dev_config = ATA_OP_NULL,
30780 .qc_prep = mv_qc_prep_iie,
30781diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
30782index ae2297c..d5c9c33 100644
30783--- a/drivers/ata/sata_nv.c
30784+++ b/drivers/ata/sata_nv.c
30785@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swncq_sht = {
30786 * cases. Define nv_hardreset() which only kicks in for post-boot
30787 * probing and use it for all variants.
30788 */
30789-static struct ata_port_operations nv_generic_ops = {
30790+static const struct ata_port_operations nv_generic_ops = {
30791 .inherits = &ata_bmdma_port_ops,
30792 .lost_interrupt = ATA_OP_NULL,
30793 .scr_read = nv_scr_read,
30794@@ -472,20 +472,20 @@ static struct ata_port_operations nv_generic_ops = {
30795 .hardreset = nv_hardreset,
30796 };
30797
30798-static struct ata_port_operations nv_nf2_ops = {
30799+static const struct ata_port_operations nv_nf2_ops = {
30800 .inherits = &nv_generic_ops,
30801 .freeze = nv_nf2_freeze,
30802 .thaw = nv_nf2_thaw,
30803 };
30804
30805-static struct ata_port_operations nv_ck804_ops = {
30806+static const struct ata_port_operations nv_ck804_ops = {
30807 .inherits = &nv_generic_ops,
30808 .freeze = nv_ck804_freeze,
30809 .thaw = nv_ck804_thaw,
30810 .host_stop = nv_ck804_host_stop,
30811 };
30812
30813-static struct ata_port_operations nv_adma_ops = {
30814+static const struct ata_port_operations nv_adma_ops = {
30815 .inherits = &nv_ck804_ops,
30816
30817 .check_atapi_dma = nv_adma_check_atapi_dma,
30818@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adma_ops = {
30819 .host_stop = nv_adma_host_stop,
30820 };
30821
30822-static struct ata_port_operations nv_swncq_ops = {
30823+static const struct ata_port_operations nv_swncq_ops = {
30824 .inherits = &nv_generic_ops,
30825
30826 .qc_defer = ata_std_qc_defer,
30827diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
30828index 07d8d00..6cc70bb 100644
30829--- a/drivers/ata/sata_promise.c
30830+++ b/drivers/ata/sata_promise.c
30831@@ -195,7 +195,7 @@ static const struct ata_port_operations pdc_common_ops = {
30832 .error_handler = pdc_error_handler,
30833 };
30834
30835-static struct ata_port_operations pdc_sata_ops = {
30836+static const struct ata_port_operations pdc_sata_ops = {
30837 .inherits = &pdc_common_ops,
30838 .cable_detect = pdc_sata_cable_detect,
30839 .freeze = pdc_sata_freeze,
30840@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sata_ops = {
30841
30842 /* First-generation chips need a more restrictive ->check_atapi_dma op,
30843 and ->freeze/thaw that ignore the hotplug controls. */
30844-static struct ata_port_operations pdc_old_sata_ops = {
30845+static const struct ata_port_operations pdc_old_sata_ops = {
30846 .inherits = &pdc_sata_ops,
30847 .freeze = pdc_freeze,
30848 .thaw = pdc_thaw,
30849 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
30850 };
30851
30852-static struct ata_port_operations pdc_pata_ops = {
30853+static const struct ata_port_operations pdc_pata_ops = {
30854 .inherits = &pdc_common_ops,
30855 .cable_detect = pdc_pata_cable_detect,
30856 .freeze = pdc_freeze,
30857diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
30858index 326c0cf..36ecebe 100644
30859--- a/drivers/ata/sata_qstor.c
30860+++ b/drivers/ata/sata_qstor.c
30861@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_sht = {
30862 .dma_boundary = QS_DMA_BOUNDARY,
30863 };
30864
30865-static struct ata_port_operations qs_ata_ops = {
30866+static const struct ata_port_operations qs_ata_ops = {
30867 .inherits = &ata_sff_port_ops,
30868
30869 .check_atapi_dma = qs_check_atapi_dma,
30870diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
30871index 3cb69d5..0871d3c 100644
30872--- a/drivers/ata/sata_sil.c
30873+++ b/drivers/ata/sata_sil.c
30874@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = {
30875 .sg_tablesize = ATA_MAX_PRD
30876 };
30877
30878-static struct ata_port_operations sil_ops = {
30879+static const struct ata_port_operations sil_ops = {
30880 .inherits = &ata_bmdma32_port_ops,
30881 .dev_config = sil_dev_config,
30882 .set_mode = sil_set_mode,
30883diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
30884index e6946fc..eddb794 100644
30885--- a/drivers/ata/sata_sil24.c
30886+++ b/drivers/ata/sata_sil24.c
30887@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_sht = {
30888 .dma_boundary = ATA_DMA_BOUNDARY,
30889 };
30890
30891-static struct ata_port_operations sil24_ops = {
30892+static const struct ata_port_operations sil24_ops = {
30893 .inherits = &sata_pmp_port_ops,
30894
30895 .qc_defer = sil24_qc_defer,
30896diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
30897index f8a91bf..9cb06b6 100644
30898--- a/drivers/ata/sata_sis.c
30899+++ b/drivers/ata/sata_sis.c
30900@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = {
30901 ATA_BMDMA_SHT(DRV_NAME),
30902 };
30903
30904-static struct ata_port_operations sis_ops = {
30905+static const struct ata_port_operations sis_ops = {
30906 .inherits = &ata_bmdma_port_ops,
30907 .scr_read = sis_scr_read,
30908 .scr_write = sis_scr_write,
30909diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
30910index 7257f2d..d04c6f5 100644
30911--- a/drivers/ata/sata_svw.c
30912+++ b/drivers/ata/sata_svw.c
30913@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = {
30914 };
30915
30916
30917-static struct ata_port_operations k2_sata_ops = {
30918+static const struct ata_port_operations k2_sata_ops = {
30919 .inherits = &ata_bmdma_port_ops,
30920 .sff_tf_load = k2_sata_tf_load,
30921 .sff_tf_read = k2_sata_tf_read,
30922diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
30923index bbcf970..cd0df0d 100644
30924--- a/drivers/ata/sata_sx4.c
30925+++ b/drivers/ata/sata_sx4.c
30926@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sata_sht = {
30927 };
30928
30929 /* TODO: inherit from base port_ops after converting to new EH */
30930-static struct ata_port_operations pdc_20621_ops = {
30931+static const struct ata_port_operations pdc_20621_ops = {
30932 .inherits = &ata_sff_port_ops,
30933
30934 .check_atapi_dma = pdc_check_atapi_dma,
30935diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
30936index e5bff47..089d859 100644
30937--- a/drivers/ata/sata_uli.c
30938+++ b/drivers/ata/sata_uli.c
30939@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht = {
30940 ATA_BMDMA_SHT(DRV_NAME),
30941 };
30942
30943-static struct ata_port_operations uli_ops = {
30944+static const struct ata_port_operations uli_ops = {
30945 .inherits = &ata_bmdma_port_ops,
30946 .scr_read = uli_scr_read,
30947 .scr_write = uli_scr_write,
30948diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
30949index f5dcca7..77b94eb 100644
30950--- a/drivers/ata/sata_via.c
30951+++ b/drivers/ata/sata_via.c
30952@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = {
30953 ATA_BMDMA_SHT(DRV_NAME),
30954 };
30955
30956-static struct ata_port_operations svia_base_ops = {
30957+static const struct ata_port_operations svia_base_ops = {
30958 .inherits = &ata_bmdma_port_ops,
30959 .sff_tf_load = svia_tf_load,
30960 };
30961
30962-static struct ata_port_operations vt6420_sata_ops = {
30963+static const struct ata_port_operations vt6420_sata_ops = {
30964 .inherits = &svia_base_ops,
30965 .freeze = svia_noop_freeze,
30966 .prereset = vt6420_prereset,
30967 .bmdma_start = vt6420_bmdma_start,
30968 };
30969
30970-static struct ata_port_operations vt6421_pata_ops = {
30971+static const struct ata_port_operations vt6421_pata_ops = {
30972 .inherits = &svia_base_ops,
30973 .cable_detect = vt6421_pata_cable_detect,
30974 .set_piomode = vt6421_set_pio_mode,
30975 .set_dmamode = vt6421_set_dma_mode,
30976 };
30977
30978-static struct ata_port_operations vt6421_sata_ops = {
30979+static const struct ata_port_operations vt6421_sata_ops = {
30980 .inherits = &svia_base_ops,
30981 .scr_read = svia_scr_read,
30982 .scr_write = svia_scr_write,
30983 };
30984
30985-static struct ata_port_operations vt8251_ops = {
30986+static const struct ata_port_operations vt8251_ops = {
30987 .inherits = &svia_base_ops,
30988 .hardreset = sata_std_hardreset,
30989 .scr_read = vt8251_scr_read,
30990diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
30991index 8b2a278..51e65d3 100644
30992--- a/drivers/ata/sata_vsc.c
30993+++ b/drivers/ata/sata_vsc.c
30994@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sata_sht = {
30995 };
30996
30997
30998-static struct ata_port_operations vsc_sata_ops = {
30999+static const struct ata_port_operations vsc_sata_ops = {
31000 .inherits = &ata_bmdma_port_ops,
31001 /* The IRQ handling is not quite standard SFF behaviour so we
31002 cannot use the default lost interrupt handler */
31003diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
31004index 5effec6..7e4019a 100644
31005--- a/drivers/atm/adummy.c
31006+++ b/drivers/atm/adummy.c
31007@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
31008 vcc->pop(vcc, skb);
31009 else
31010 dev_kfree_skb_any(skb);
31011- atomic_inc(&vcc->stats->tx);
31012+ atomic_inc_unchecked(&vcc->stats->tx);
31013
31014 return 0;
31015 }
31016diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
31017index 66e1813..26a27c6 100644
31018--- a/drivers/atm/ambassador.c
31019+++ b/drivers/atm/ambassador.c
31020@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
31021 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
31022
31023 // VC layer stats
31024- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31025+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31026
31027 // free the descriptor
31028 kfree (tx_descr);
31029@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
31030 dump_skb ("<<<", vc, skb);
31031
31032 // VC layer stats
31033- atomic_inc(&atm_vcc->stats->rx);
31034+ atomic_inc_unchecked(&atm_vcc->stats->rx);
31035 __net_timestamp(skb);
31036 // end of our responsability
31037 atm_vcc->push (atm_vcc, skb);
31038@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
31039 } else {
31040 PRINTK (KERN_INFO, "dropped over-size frame");
31041 // should we count this?
31042- atomic_inc(&atm_vcc->stats->rx_drop);
31043+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31044 }
31045
31046 } else {
31047@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
31048 }
31049
31050 if (check_area (skb->data, skb->len)) {
31051- atomic_inc(&atm_vcc->stats->tx_err);
31052+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
31053 return -ENOMEM; // ?
31054 }
31055
31056diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
31057index 02ad83d..6daffeb 100644
31058--- a/drivers/atm/atmtcp.c
31059+++ b/drivers/atm/atmtcp.c
31060@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31061 if (vcc->pop) vcc->pop(vcc,skb);
31062 else dev_kfree_skb(skb);
31063 if (dev_data) return 0;
31064- atomic_inc(&vcc->stats->tx_err);
31065+ atomic_inc_unchecked(&vcc->stats->tx_err);
31066 return -ENOLINK;
31067 }
31068 size = skb->len+sizeof(struct atmtcp_hdr);
31069@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31070 if (!new_skb) {
31071 if (vcc->pop) vcc->pop(vcc,skb);
31072 else dev_kfree_skb(skb);
31073- atomic_inc(&vcc->stats->tx_err);
31074+ atomic_inc_unchecked(&vcc->stats->tx_err);
31075 return -ENOBUFS;
31076 }
31077 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
31078@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31079 if (vcc->pop) vcc->pop(vcc,skb);
31080 else dev_kfree_skb(skb);
31081 out_vcc->push(out_vcc,new_skb);
31082- atomic_inc(&vcc->stats->tx);
31083- atomic_inc(&out_vcc->stats->rx);
31084+ atomic_inc_unchecked(&vcc->stats->tx);
31085+ atomic_inc_unchecked(&out_vcc->stats->rx);
31086 return 0;
31087 }
31088
31089@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
31090 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
31091 read_unlock(&vcc_sklist_lock);
31092 if (!out_vcc) {
31093- atomic_inc(&vcc->stats->tx_err);
31094+ atomic_inc_unchecked(&vcc->stats->tx_err);
31095 goto done;
31096 }
31097 skb_pull(skb,sizeof(struct atmtcp_hdr));
31098@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
31099 __net_timestamp(new_skb);
31100 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
31101 out_vcc->push(out_vcc,new_skb);
31102- atomic_inc(&vcc->stats->tx);
31103- atomic_inc(&out_vcc->stats->rx);
31104+ atomic_inc_unchecked(&vcc->stats->tx);
31105+ atomic_inc_unchecked(&out_vcc->stats->rx);
31106 done:
31107 if (vcc->pop) vcc->pop(vcc,skb);
31108 else dev_kfree_skb(skb);
31109diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
31110index 0c30261..3da356e 100644
31111--- a/drivers/atm/eni.c
31112+++ b/drivers/atm/eni.c
31113@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
31114 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
31115 vcc->dev->number);
31116 length = 0;
31117- atomic_inc(&vcc->stats->rx_err);
31118+ atomic_inc_unchecked(&vcc->stats->rx_err);
31119 }
31120 else {
31121 length = ATM_CELL_SIZE-1; /* no HEC */
31122@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
31123 size);
31124 }
31125 eff = length = 0;
31126- atomic_inc(&vcc->stats->rx_err);
31127+ atomic_inc_unchecked(&vcc->stats->rx_err);
31128 }
31129 else {
31130 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
31131@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
31132 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
31133 vcc->dev->number,vcc->vci,length,size << 2,descr);
31134 length = eff = 0;
31135- atomic_inc(&vcc->stats->rx_err);
31136+ atomic_inc_unchecked(&vcc->stats->rx_err);
31137 }
31138 }
31139 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
31140@@ -770,7 +770,7 @@ rx_dequeued++;
31141 vcc->push(vcc,skb);
31142 pushed++;
31143 }
31144- atomic_inc(&vcc->stats->rx);
31145+ atomic_inc_unchecked(&vcc->stats->rx);
31146 }
31147 wake_up(&eni_dev->rx_wait);
31148 }
31149@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
31150 PCI_DMA_TODEVICE);
31151 if (vcc->pop) vcc->pop(vcc,skb);
31152 else dev_kfree_skb_irq(skb);
31153- atomic_inc(&vcc->stats->tx);
31154+ atomic_inc_unchecked(&vcc->stats->tx);
31155 wake_up(&eni_dev->tx_wait);
31156 dma_complete++;
31157 }
31158@@ -1570,7 +1570,7 @@ tx_complete++;
31159 /*--------------------------------- entries ---------------------------------*/
31160
31161
31162-static const char *media_name[] __devinitdata = {
31163+static const char *media_name[] __devinitconst = {
31164 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
31165 "UTP", "05?", "06?", "07?", /* 4- 7 */
31166 "TAXI","09?", "10?", "11?", /* 8-11 */
31167diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
31168index cd5049a..a51209f 100644
31169--- a/drivers/atm/firestream.c
31170+++ b/drivers/atm/firestream.c
31171@@ -748,7 +748,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
31172 }
31173 }
31174
31175- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31176+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31177
31178 fs_dprintk (FS_DEBUG_TXMEM, "i");
31179 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
31180@@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
31181 #endif
31182 skb_put (skb, qe->p1 & 0xffff);
31183 ATM_SKB(skb)->vcc = atm_vcc;
31184- atomic_inc(&atm_vcc->stats->rx);
31185+ atomic_inc_unchecked(&atm_vcc->stats->rx);
31186 __net_timestamp(skb);
31187 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
31188 atm_vcc->push (atm_vcc, skb);
31189@@ -836,12 +836,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
31190 kfree (pe);
31191 }
31192 if (atm_vcc)
31193- atomic_inc(&atm_vcc->stats->rx_drop);
31194+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31195 break;
31196 case 0x1f: /* Reassembly abort: no buffers. */
31197 /* Silently increment error counter. */
31198 if (atm_vcc)
31199- atomic_inc(&atm_vcc->stats->rx_drop);
31200+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31201 break;
31202 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
31203 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
31204diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
31205index f766cc4..a34002e 100644
31206--- a/drivers/atm/fore200e.c
31207+++ b/drivers/atm/fore200e.c
31208@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
31209 #endif
31210 /* check error condition */
31211 if (*entry->status & STATUS_ERROR)
31212- atomic_inc(&vcc->stats->tx_err);
31213+ atomic_inc_unchecked(&vcc->stats->tx_err);
31214 else
31215- atomic_inc(&vcc->stats->tx);
31216+ atomic_inc_unchecked(&vcc->stats->tx);
31217 }
31218 }
31219
31220@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
31221 if (skb == NULL) {
31222 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
31223
31224- atomic_inc(&vcc->stats->rx_drop);
31225+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31226 return -ENOMEM;
31227 }
31228
31229@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
31230
31231 dev_kfree_skb_any(skb);
31232
31233- atomic_inc(&vcc->stats->rx_drop);
31234+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31235 return -ENOMEM;
31236 }
31237
31238 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
31239
31240 vcc->push(vcc, skb);
31241- atomic_inc(&vcc->stats->rx);
31242+ atomic_inc_unchecked(&vcc->stats->rx);
31243
31244 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
31245
31246@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
31247 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
31248 fore200e->atm_dev->number,
31249 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
31250- atomic_inc(&vcc->stats->rx_err);
31251+ atomic_inc_unchecked(&vcc->stats->rx_err);
31252 }
31253 }
31254
31255@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
31256 goto retry_here;
31257 }
31258
31259- atomic_inc(&vcc->stats->tx_err);
31260+ atomic_inc_unchecked(&vcc->stats->tx_err);
31261
31262 fore200e->tx_sat++;
31263 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
31264diff --git a/drivers/atm/he.c b/drivers/atm/he.c
31265index 7066703..2b130de 100644
31266--- a/drivers/atm/he.c
31267+++ b/drivers/atm/he.c
31268@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
31269
31270 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
31271 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
31272- atomic_inc(&vcc->stats->rx_drop);
31273+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31274 goto return_host_buffers;
31275 }
31276
31277@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
31278 RBRQ_LEN_ERR(he_dev->rbrq_head)
31279 ? "LEN_ERR" : "",
31280 vcc->vpi, vcc->vci);
31281- atomic_inc(&vcc->stats->rx_err);
31282+ atomic_inc_unchecked(&vcc->stats->rx_err);
31283 goto return_host_buffers;
31284 }
31285
31286@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
31287 vcc->push(vcc, skb);
31288 spin_lock(&he_dev->global_lock);
31289
31290- atomic_inc(&vcc->stats->rx);
31291+ atomic_inc_unchecked(&vcc->stats->rx);
31292
31293 return_host_buffers:
31294 ++pdus_assembled;
31295@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
31296 tpd->vcc->pop(tpd->vcc, tpd->skb);
31297 else
31298 dev_kfree_skb_any(tpd->skb);
31299- atomic_inc(&tpd->vcc->stats->tx_err);
31300+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
31301 }
31302 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
31303 return;
31304@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31305 vcc->pop(vcc, skb);
31306 else
31307 dev_kfree_skb_any(skb);
31308- atomic_inc(&vcc->stats->tx_err);
31309+ atomic_inc_unchecked(&vcc->stats->tx_err);
31310 return -EINVAL;
31311 }
31312
31313@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31314 vcc->pop(vcc, skb);
31315 else
31316 dev_kfree_skb_any(skb);
31317- atomic_inc(&vcc->stats->tx_err);
31318+ atomic_inc_unchecked(&vcc->stats->tx_err);
31319 return -EINVAL;
31320 }
31321 #endif
31322@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31323 vcc->pop(vcc, skb);
31324 else
31325 dev_kfree_skb_any(skb);
31326- atomic_inc(&vcc->stats->tx_err);
31327+ atomic_inc_unchecked(&vcc->stats->tx_err);
31328 spin_unlock_irqrestore(&he_dev->global_lock, flags);
31329 return -ENOMEM;
31330 }
31331@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31332 vcc->pop(vcc, skb);
31333 else
31334 dev_kfree_skb_any(skb);
31335- atomic_inc(&vcc->stats->tx_err);
31336+ atomic_inc_unchecked(&vcc->stats->tx_err);
31337 spin_unlock_irqrestore(&he_dev->global_lock, flags);
31338 return -ENOMEM;
31339 }
31340@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31341 __enqueue_tpd(he_dev, tpd, cid);
31342 spin_unlock_irqrestore(&he_dev->global_lock, flags);
31343
31344- atomic_inc(&vcc->stats->tx);
31345+ atomic_inc_unchecked(&vcc->stats->tx);
31346
31347 return 0;
31348 }
31349diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
31350index 4e49021..01b1512 100644
31351--- a/drivers/atm/horizon.c
31352+++ b/drivers/atm/horizon.c
31353@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
31354 {
31355 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
31356 // VC layer stats
31357- atomic_inc(&vcc->stats->rx);
31358+ atomic_inc_unchecked(&vcc->stats->rx);
31359 __net_timestamp(skb);
31360 // end of our responsability
31361 vcc->push (vcc, skb);
31362@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
31363 dev->tx_iovec = NULL;
31364
31365 // VC layer stats
31366- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31367+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31368
31369 // free the skb
31370 hrz_kfree_skb (skb);
31371diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
31372index e33ae00..9deb4ab 100644
31373--- a/drivers/atm/idt77252.c
31374+++ b/drivers/atm/idt77252.c
31375@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
31376 else
31377 dev_kfree_skb(skb);
31378
31379- atomic_inc(&vcc->stats->tx);
31380+ atomic_inc_unchecked(&vcc->stats->tx);
31381 }
31382
31383 atomic_dec(&scq->used);
31384@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31385 if ((sb = dev_alloc_skb(64)) == NULL) {
31386 printk("%s: Can't allocate buffers for aal0.\n",
31387 card->name);
31388- atomic_add(i, &vcc->stats->rx_drop);
31389+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
31390 break;
31391 }
31392 if (!atm_charge(vcc, sb->truesize)) {
31393 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
31394 card->name);
31395- atomic_add(i - 1, &vcc->stats->rx_drop);
31396+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
31397 dev_kfree_skb(sb);
31398 break;
31399 }
31400@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31401 ATM_SKB(sb)->vcc = vcc;
31402 __net_timestamp(sb);
31403 vcc->push(vcc, sb);
31404- atomic_inc(&vcc->stats->rx);
31405+ atomic_inc_unchecked(&vcc->stats->rx);
31406
31407 cell += ATM_CELL_PAYLOAD;
31408 }
31409@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31410 "(CDC: %08x)\n",
31411 card->name, len, rpp->len, readl(SAR_REG_CDC));
31412 recycle_rx_pool_skb(card, rpp);
31413- atomic_inc(&vcc->stats->rx_err);
31414+ atomic_inc_unchecked(&vcc->stats->rx_err);
31415 return;
31416 }
31417 if (stat & SAR_RSQE_CRC) {
31418 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
31419 recycle_rx_pool_skb(card, rpp);
31420- atomic_inc(&vcc->stats->rx_err);
31421+ atomic_inc_unchecked(&vcc->stats->rx_err);
31422 return;
31423 }
31424 if (skb_queue_len(&rpp->queue) > 1) {
31425@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31426 RXPRINTK("%s: Can't alloc RX skb.\n",
31427 card->name);
31428 recycle_rx_pool_skb(card, rpp);
31429- atomic_inc(&vcc->stats->rx_err);
31430+ atomic_inc_unchecked(&vcc->stats->rx_err);
31431 return;
31432 }
31433 if (!atm_charge(vcc, skb->truesize)) {
31434@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31435 __net_timestamp(skb);
31436
31437 vcc->push(vcc, skb);
31438- atomic_inc(&vcc->stats->rx);
31439+ atomic_inc_unchecked(&vcc->stats->rx);
31440
31441 return;
31442 }
31443@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31444 __net_timestamp(skb);
31445
31446 vcc->push(vcc, skb);
31447- atomic_inc(&vcc->stats->rx);
31448+ atomic_inc_unchecked(&vcc->stats->rx);
31449
31450 if (skb->truesize > SAR_FB_SIZE_3)
31451 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
31452@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
31453 if (vcc->qos.aal != ATM_AAL0) {
31454 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
31455 card->name, vpi, vci);
31456- atomic_inc(&vcc->stats->rx_drop);
31457+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31458 goto drop;
31459 }
31460
31461 if ((sb = dev_alloc_skb(64)) == NULL) {
31462 printk("%s: Can't allocate buffers for AAL0.\n",
31463 card->name);
31464- atomic_inc(&vcc->stats->rx_err);
31465+ atomic_inc_unchecked(&vcc->stats->rx_err);
31466 goto drop;
31467 }
31468
31469@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
31470 ATM_SKB(sb)->vcc = vcc;
31471 __net_timestamp(sb);
31472 vcc->push(vcc, sb);
31473- atomic_inc(&vcc->stats->rx);
31474+ atomic_inc_unchecked(&vcc->stats->rx);
31475
31476 drop:
31477 skb_pull(queue, 64);
31478@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
31479
31480 if (vc == NULL) {
31481 printk("%s: NULL connection in send().\n", card->name);
31482- atomic_inc(&vcc->stats->tx_err);
31483+ atomic_inc_unchecked(&vcc->stats->tx_err);
31484 dev_kfree_skb(skb);
31485 return -EINVAL;
31486 }
31487 if (!test_bit(VCF_TX, &vc->flags)) {
31488 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
31489- atomic_inc(&vcc->stats->tx_err);
31490+ atomic_inc_unchecked(&vcc->stats->tx_err);
31491 dev_kfree_skb(skb);
31492 return -EINVAL;
31493 }
31494@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
31495 break;
31496 default:
31497 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
31498- atomic_inc(&vcc->stats->tx_err);
31499+ atomic_inc_unchecked(&vcc->stats->tx_err);
31500 dev_kfree_skb(skb);
31501 return -EINVAL;
31502 }
31503
31504 if (skb_shinfo(skb)->nr_frags != 0) {
31505 printk("%s: No scatter-gather yet.\n", card->name);
31506- atomic_inc(&vcc->stats->tx_err);
31507+ atomic_inc_unchecked(&vcc->stats->tx_err);
31508 dev_kfree_skb(skb);
31509 return -EINVAL;
31510 }
31511@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
31512
31513 err = queue_skb(card, vc, skb, oam);
31514 if (err) {
31515- atomic_inc(&vcc->stats->tx_err);
31516+ atomic_inc_unchecked(&vcc->stats->tx_err);
31517 dev_kfree_skb(skb);
31518 return err;
31519 }
31520@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
31521 skb = dev_alloc_skb(64);
31522 if (!skb) {
31523 printk("%s: Out of memory in send_oam().\n", card->name);
31524- atomic_inc(&vcc->stats->tx_err);
31525+ atomic_inc_unchecked(&vcc->stats->tx_err);
31526 return -ENOMEM;
31527 }
31528 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
31529diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
31530index b2c1b37..faa672b 100644
31531--- a/drivers/atm/iphase.c
31532+++ b/drivers/atm/iphase.c
31533@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
31534 status = (u_short) (buf_desc_ptr->desc_mode);
31535 if (status & (RX_CER | RX_PTE | RX_OFL))
31536 {
31537- atomic_inc(&vcc->stats->rx_err);
31538+ atomic_inc_unchecked(&vcc->stats->rx_err);
31539 IF_ERR(printk("IA: bad packet, dropping it");)
31540 if (status & RX_CER) {
31541 IF_ERR(printk(" cause: packet CRC error\n");)
31542@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
31543 len = dma_addr - buf_addr;
31544 if (len > iadev->rx_buf_sz) {
31545 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
31546- atomic_inc(&vcc->stats->rx_err);
31547+ atomic_inc_unchecked(&vcc->stats->rx_err);
31548 goto out_free_desc;
31549 }
31550
31551@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *dev)
31552 ia_vcc = INPH_IA_VCC(vcc);
31553 if (ia_vcc == NULL)
31554 {
31555- atomic_inc(&vcc->stats->rx_err);
31556+ atomic_inc_unchecked(&vcc->stats->rx_err);
31557 dev_kfree_skb_any(skb);
31558 atm_return(vcc, atm_guess_pdu2truesize(len));
31559 goto INCR_DLE;
31560@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *dev)
31561 if ((length > iadev->rx_buf_sz) || (length >
31562 (skb->len - sizeof(struct cpcs_trailer))))
31563 {
31564- atomic_inc(&vcc->stats->rx_err);
31565+ atomic_inc_unchecked(&vcc->stats->rx_err);
31566 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
31567 length, skb->len);)
31568 dev_kfree_skb_any(skb);
31569@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *dev)
31570
31571 IF_RX(printk("rx_dle_intr: skb push");)
31572 vcc->push(vcc,skb);
31573- atomic_inc(&vcc->stats->rx);
31574+ atomic_inc_unchecked(&vcc->stats->rx);
31575 iadev->rx_pkt_cnt++;
31576 }
31577 INCR_DLE:
31578@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
31579 {
31580 struct k_sonet_stats *stats;
31581 stats = &PRIV(_ia_dev[board])->sonet_stats;
31582- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
31583- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
31584- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
31585- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
31586- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
31587- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
31588- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
31589- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
31590- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
31591+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
31592+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
31593+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
31594+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
31595+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
31596+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
31597+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
31598+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
31599+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
31600 }
31601 ia_cmds.status = 0;
31602 break;
31603@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
31604 if ((desc == 0) || (desc > iadev->num_tx_desc))
31605 {
31606 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
31607- atomic_inc(&vcc->stats->tx);
31608+ atomic_inc_unchecked(&vcc->stats->tx);
31609 if (vcc->pop)
31610 vcc->pop(vcc, skb);
31611 else
31612@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
31613 ATM_DESC(skb) = vcc->vci;
31614 skb_queue_tail(&iadev->tx_dma_q, skb);
31615
31616- atomic_inc(&vcc->stats->tx);
31617+ atomic_inc_unchecked(&vcc->stats->tx);
31618 iadev->tx_pkt_cnt++;
31619 /* Increment transaction counter */
31620 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
31621
31622 #if 0
31623 /* add flow control logic */
31624- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
31625+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
31626 if (iavcc->vc_desc_cnt > 10) {
31627 vcc->tx_quota = vcc->tx_quota * 3 / 4;
31628 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
31629diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
31630index cf97c34..8d30655 100644
31631--- a/drivers/atm/lanai.c
31632+++ b/drivers/atm/lanai.c
31633@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
31634 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
31635 lanai_endtx(lanai, lvcc);
31636 lanai_free_skb(lvcc->tx.atmvcc, skb);
31637- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
31638+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
31639 }
31640
31641 /* Try to fill the buffer - don't call unless there is backlog */
31642@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
31643 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
31644 __net_timestamp(skb);
31645 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
31646- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
31647+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
31648 out:
31649 lvcc->rx.buf.ptr = end;
31650 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
31651@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31652 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
31653 "vcc %d\n", lanai->number, (unsigned int) s, vci);
31654 lanai->stats.service_rxnotaal5++;
31655- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31656+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31657 return 0;
31658 }
31659 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
31660@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31661 int bytes;
31662 read_unlock(&vcc_sklist_lock);
31663 DPRINTK("got trashed rx pdu on vci %d\n", vci);
31664- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31665+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31666 lvcc->stats.x.aal5.service_trash++;
31667 bytes = (SERVICE_GET_END(s) * 16) -
31668 (((unsigned long) lvcc->rx.buf.ptr) -
31669@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31670 }
31671 if (s & SERVICE_STREAM) {
31672 read_unlock(&vcc_sklist_lock);
31673- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31674+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31675 lvcc->stats.x.aal5.service_stream++;
31676 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
31677 "PDU on VCI %d!\n", lanai->number, vci);
31678@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31679 return 0;
31680 }
31681 DPRINTK("got rx crc error on vci %d\n", vci);
31682- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31683+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31684 lvcc->stats.x.aal5.service_rxcrc++;
31685 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
31686 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
31687diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
31688index 3da804b..d3b0eed 100644
31689--- a/drivers/atm/nicstar.c
31690+++ b/drivers/atm/nicstar.c
31691@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31692 if ((vc = (vc_map *) vcc->dev_data) == NULL)
31693 {
31694 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
31695- atomic_inc(&vcc->stats->tx_err);
31696+ atomic_inc_unchecked(&vcc->stats->tx_err);
31697 dev_kfree_skb_any(skb);
31698 return -EINVAL;
31699 }
31700@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31701 if (!vc->tx)
31702 {
31703 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
31704- atomic_inc(&vcc->stats->tx_err);
31705+ atomic_inc_unchecked(&vcc->stats->tx_err);
31706 dev_kfree_skb_any(skb);
31707 return -EINVAL;
31708 }
31709@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31710 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
31711 {
31712 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
31713- atomic_inc(&vcc->stats->tx_err);
31714+ atomic_inc_unchecked(&vcc->stats->tx_err);
31715 dev_kfree_skb_any(skb);
31716 return -EINVAL;
31717 }
31718@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31719 if (skb_shinfo(skb)->nr_frags != 0)
31720 {
31721 printk("nicstar%d: No scatter-gather yet.\n", card->index);
31722- atomic_inc(&vcc->stats->tx_err);
31723+ atomic_inc_unchecked(&vcc->stats->tx_err);
31724 dev_kfree_skb_any(skb);
31725 return -EINVAL;
31726 }
31727@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31728
31729 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
31730 {
31731- atomic_inc(&vcc->stats->tx_err);
31732+ atomic_inc_unchecked(&vcc->stats->tx_err);
31733 dev_kfree_skb_any(skb);
31734 return -EIO;
31735 }
31736- atomic_inc(&vcc->stats->tx);
31737+ atomic_inc_unchecked(&vcc->stats->tx);
31738
31739 return 0;
31740 }
31741@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31742 {
31743 printk("nicstar%d: Can't allocate buffers for aal0.\n",
31744 card->index);
31745- atomic_add(i,&vcc->stats->rx_drop);
31746+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
31747 break;
31748 }
31749 if (!atm_charge(vcc, sb->truesize))
31750 {
31751 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
31752 card->index);
31753- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
31754+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
31755 dev_kfree_skb_any(sb);
31756 break;
31757 }
31758@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31759 ATM_SKB(sb)->vcc = vcc;
31760 __net_timestamp(sb);
31761 vcc->push(vcc, sb);
31762- atomic_inc(&vcc->stats->rx);
31763+ atomic_inc_unchecked(&vcc->stats->rx);
31764 cell += ATM_CELL_PAYLOAD;
31765 }
31766
31767@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31768 if (iovb == NULL)
31769 {
31770 printk("nicstar%d: Out of iovec buffers.\n", card->index);
31771- atomic_inc(&vcc->stats->rx_drop);
31772+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31773 recycle_rx_buf(card, skb);
31774 return;
31775 }
31776@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31777 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
31778 {
31779 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
31780- atomic_inc(&vcc->stats->rx_err);
31781+ atomic_inc_unchecked(&vcc->stats->rx_err);
31782 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
31783 NS_SKB(iovb)->iovcnt = 0;
31784 iovb->len = 0;
31785@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31786 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
31787 card->index);
31788 which_list(card, skb);
31789- atomic_inc(&vcc->stats->rx_err);
31790+ atomic_inc_unchecked(&vcc->stats->rx_err);
31791 recycle_rx_buf(card, skb);
31792 vc->rx_iov = NULL;
31793 recycle_iov_buf(card, iovb);
31794@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31795 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
31796 card->index);
31797 which_list(card, skb);
31798- atomic_inc(&vcc->stats->rx_err);
31799+ atomic_inc_unchecked(&vcc->stats->rx_err);
31800 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
31801 NS_SKB(iovb)->iovcnt);
31802 vc->rx_iov = NULL;
31803@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31804 printk(" - PDU size mismatch.\n");
31805 else
31806 printk(".\n");
31807- atomic_inc(&vcc->stats->rx_err);
31808+ atomic_inc_unchecked(&vcc->stats->rx_err);
31809 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
31810 NS_SKB(iovb)->iovcnt);
31811 vc->rx_iov = NULL;
31812@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31813 if (!atm_charge(vcc, skb->truesize))
31814 {
31815 push_rxbufs(card, skb);
31816- atomic_inc(&vcc->stats->rx_drop);
31817+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31818 }
31819 else
31820 {
31821@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31822 ATM_SKB(skb)->vcc = vcc;
31823 __net_timestamp(skb);
31824 vcc->push(vcc, skb);
31825- atomic_inc(&vcc->stats->rx);
31826+ atomic_inc_unchecked(&vcc->stats->rx);
31827 }
31828 }
31829 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
31830@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31831 if (!atm_charge(vcc, sb->truesize))
31832 {
31833 push_rxbufs(card, sb);
31834- atomic_inc(&vcc->stats->rx_drop);
31835+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31836 }
31837 else
31838 {
31839@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31840 ATM_SKB(sb)->vcc = vcc;
31841 __net_timestamp(sb);
31842 vcc->push(vcc, sb);
31843- atomic_inc(&vcc->stats->rx);
31844+ atomic_inc_unchecked(&vcc->stats->rx);
31845 }
31846
31847 push_rxbufs(card, skb);
31848@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31849 if (!atm_charge(vcc, skb->truesize))
31850 {
31851 push_rxbufs(card, skb);
31852- atomic_inc(&vcc->stats->rx_drop);
31853+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31854 }
31855 else
31856 {
31857@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31858 ATM_SKB(skb)->vcc = vcc;
31859 __net_timestamp(skb);
31860 vcc->push(vcc, skb);
31861- atomic_inc(&vcc->stats->rx);
31862+ atomic_inc_unchecked(&vcc->stats->rx);
31863 }
31864
31865 push_rxbufs(card, sb);
31866@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31867 if (hb == NULL)
31868 {
31869 printk("nicstar%d: Out of huge buffers.\n", card->index);
31870- atomic_inc(&vcc->stats->rx_drop);
31871+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31872 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
31873 NS_SKB(iovb)->iovcnt);
31874 vc->rx_iov = NULL;
31875@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31876 }
31877 else
31878 dev_kfree_skb_any(hb);
31879- atomic_inc(&vcc->stats->rx_drop);
31880+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31881 }
31882 else
31883 {
31884@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31885 #endif /* NS_USE_DESTRUCTORS */
31886 __net_timestamp(hb);
31887 vcc->push(vcc, hb);
31888- atomic_inc(&vcc->stats->rx);
31889+ atomic_inc_unchecked(&vcc->stats->rx);
31890 }
31891 }
31892
31893diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
31894index 84c93ff..e6ed269 100644
31895--- a/drivers/atm/solos-pci.c
31896+++ b/drivers/atm/solos-pci.c
31897@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
31898 }
31899 atm_charge(vcc, skb->truesize);
31900 vcc->push(vcc, skb);
31901- atomic_inc(&vcc->stats->rx);
31902+ atomic_inc_unchecked(&vcc->stats->rx);
31903 break;
31904
31905 case PKT_STATUS:
31906@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *buf)
31907 char msg[500];
31908 char item[10];
31909
31910+ pax_track_stack();
31911+
31912 len = buf->len;
31913 for (i = 0; i < len; i++){
31914 if(i % 8 == 0)
31915@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_card *card)
31916 vcc = SKB_CB(oldskb)->vcc;
31917
31918 if (vcc) {
31919- atomic_inc(&vcc->stats->tx);
31920+ atomic_inc_unchecked(&vcc->stats->tx);
31921 solos_pop(vcc, oldskb);
31922 } else
31923 dev_kfree_skb_irq(oldskb);
31924diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
31925index 6dd3f59..ee377f3 100644
31926--- a/drivers/atm/suni.c
31927+++ b/drivers/atm/suni.c
31928@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
31929
31930
31931 #define ADD_LIMITED(s,v) \
31932- atomic_add((v),&stats->s); \
31933- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
31934+ atomic_add_unchecked((v),&stats->s); \
31935+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
31936
31937
31938 static void suni_hz(unsigned long from_timer)
31939diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
31940index fc8cb07..4a80e53 100644
31941--- a/drivers/atm/uPD98402.c
31942+++ b/drivers/atm/uPD98402.c
31943@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
31944 struct sonet_stats tmp;
31945 int error = 0;
31946
31947- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
31948+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
31949 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
31950 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
31951 if (zero && !error) {
31952@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
31953
31954
31955 #define ADD_LIMITED(s,v) \
31956- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
31957- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
31958- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
31959+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
31960+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
31961+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
31962
31963
31964 static void stat_event(struct atm_dev *dev)
31965@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev *dev)
31966 if (reason & uPD98402_INT_PFM) stat_event(dev);
31967 if (reason & uPD98402_INT_PCO) {
31968 (void) GET(PCOCR); /* clear interrupt cause */
31969- atomic_add(GET(HECCT),
31970+ atomic_add_unchecked(GET(HECCT),
31971 &PRIV(dev)->sonet_stats.uncorr_hcs);
31972 }
31973 if ((reason & uPD98402_INT_RFO) &&
31974@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev *dev)
31975 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
31976 uPD98402_INT_LOS),PIMR); /* enable them */
31977 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
31978- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
31979- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
31980- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
31981+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
31982+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
31983+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
31984 return 0;
31985 }
31986
31987diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
31988index 2e9635b..32927b4 100644
31989--- a/drivers/atm/zatm.c
31990+++ b/drivers/atm/zatm.c
31991@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
31992 }
31993 if (!size) {
31994 dev_kfree_skb_irq(skb);
31995- if (vcc) atomic_inc(&vcc->stats->rx_err);
31996+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
31997 continue;
31998 }
31999 if (!atm_charge(vcc,skb->truesize)) {
32000@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
32001 skb->len = size;
32002 ATM_SKB(skb)->vcc = vcc;
32003 vcc->push(vcc,skb);
32004- atomic_inc(&vcc->stats->rx);
32005+ atomic_inc_unchecked(&vcc->stats->rx);
32006 }
32007 zout(pos & 0xffff,MTA(mbx));
32008 #if 0 /* probably a stupid idea */
32009@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
32010 skb_queue_head(&zatm_vcc->backlog,skb);
32011 break;
32012 }
32013- atomic_inc(&vcc->stats->tx);
32014+ atomic_inc_unchecked(&vcc->stats->tx);
32015 wake_up(&zatm_vcc->tx_wait);
32016 }
32017
32018diff --git a/drivers/base/bus.c b/drivers/base/bus.c
32019index 63c143e..fece183 100644
32020--- a/drivers/base/bus.c
32021+++ b/drivers/base/bus.c
32022@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
32023 return ret;
32024 }
32025
32026-static struct sysfs_ops driver_sysfs_ops = {
32027+static const struct sysfs_ops driver_sysfs_ops = {
32028 .show = drv_attr_show,
32029 .store = drv_attr_store,
32030 };
32031@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
32032 return ret;
32033 }
32034
32035-static struct sysfs_ops bus_sysfs_ops = {
32036+static const struct sysfs_ops bus_sysfs_ops = {
32037 .show = bus_attr_show,
32038 .store = bus_attr_store,
32039 };
32040@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
32041 return 0;
32042 }
32043
32044-static struct kset_uevent_ops bus_uevent_ops = {
32045+static const struct kset_uevent_ops bus_uevent_ops = {
32046 .filter = bus_uevent_filter,
32047 };
32048
32049diff --git a/drivers/base/class.c b/drivers/base/class.c
32050index 6e2c3b0..cb61871 100644
32051--- a/drivers/base/class.c
32052+++ b/drivers/base/class.c
32053@@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj)
32054 kfree(cp);
32055 }
32056
32057-static struct sysfs_ops class_sysfs_ops = {
32058+static const struct sysfs_ops class_sysfs_ops = {
32059 .show = class_attr_show,
32060 .store = class_attr_store,
32061 };
32062diff --git a/drivers/base/core.c b/drivers/base/core.c
32063index f33d768..a9358d0 100644
32064--- a/drivers/base/core.c
32065+++ b/drivers/base/core.c
32066@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
32067 return ret;
32068 }
32069
32070-static struct sysfs_ops dev_sysfs_ops = {
32071+static const struct sysfs_ops dev_sysfs_ops = {
32072 .show = dev_attr_show,
32073 .store = dev_attr_store,
32074 };
32075@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
32076 return retval;
32077 }
32078
32079-static struct kset_uevent_ops device_uevent_ops = {
32080+static const struct kset_uevent_ops device_uevent_ops = {
32081 .filter = dev_uevent_filter,
32082 .name = dev_uevent_name,
32083 .uevent = dev_uevent,
32084diff --git a/drivers/base/memory.c b/drivers/base/memory.c
32085index 989429c..2272b00 100644
32086--- a/drivers/base/memory.c
32087+++ b/drivers/base/memory.c
32088@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
32089 return retval;
32090 }
32091
32092-static struct kset_uevent_ops memory_uevent_ops = {
32093+static const struct kset_uevent_ops memory_uevent_ops = {
32094 .name = memory_uevent_name,
32095 .uevent = memory_uevent,
32096 };
32097diff --git a/drivers/base/sys.c b/drivers/base/sys.c
32098index 3f202f7..61c4a6f 100644
32099--- a/drivers/base/sys.c
32100+++ b/drivers/base/sys.c
32101@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
32102 return -EIO;
32103 }
32104
32105-static struct sysfs_ops sysfs_ops = {
32106+static const struct sysfs_ops sysfs_ops = {
32107 .show = sysdev_show,
32108 .store = sysdev_store,
32109 };
32110@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
32111 return -EIO;
32112 }
32113
32114-static struct sysfs_ops sysfs_class_ops = {
32115+static const struct sysfs_ops sysfs_class_ops = {
32116 .show = sysdev_class_show,
32117 .store = sysdev_class_store,
32118 };
32119diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
32120index eb4fa19..1954777 100644
32121--- a/drivers/block/DAC960.c
32122+++ b/drivers/block/DAC960.c
32123@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
32124 unsigned long flags;
32125 int Channel, TargetID;
32126
32127+ pax_track_stack();
32128+
32129 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
32130 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
32131 sizeof(DAC960_SCSI_Inquiry_T) +
32132diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
32133index 68b90d9..7e2e3f3 100644
32134--- a/drivers/block/cciss.c
32135+++ b/drivers/block/cciss.c
32136@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
32137 int err;
32138 u32 cp;
32139
32140+ memset(&arg64, 0, sizeof(arg64));
32141+
32142 err = 0;
32143 err |=
32144 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
32145@@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
32146 /* Wait (up to 20 seconds) for a command to complete */
32147
32148 for (i = 20 * HZ; i > 0; i--) {
32149- done = hba[ctlr]->access.command_completed(hba[ctlr]);
32150+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
32151 if (done == FIFO_EMPTY)
32152 schedule_timeout_uninterruptible(1);
32153 else
32154@@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
32155 resend_cmd1:
32156
32157 /* Disable interrupt on the board. */
32158- h->access.set_intr_mask(h, CCISS_INTR_OFF);
32159+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
32160
32161 /* Make sure there is room in the command FIFO */
32162 /* Actually it should be completely empty at this time */
32163@@ -2884,13 +2886,13 @@ resend_cmd1:
32164 /* tape side of the driver. */
32165 for (i = 200000; i > 0; i--) {
32166 /* if fifo isn't full go */
32167- if (!(h->access.fifo_full(h)))
32168+ if (!(h->access->fifo_full(h)))
32169 break;
32170 udelay(10);
32171 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
32172 " waiting!\n", h->ctlr);
32173 }
32174- h->access.submit_command(h, c); /* Send the cmd */
32175+ h->access->submit_command(h, c); /* Send the cmd */
32176 do {
32177 complete = pollcomplete(h->ctlr);
32178
32179@@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
32180 while (!hlist_empty(&h->reqQ)) {
32181 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
32182 /* can't do anything if fifo is full */
32183- if ((h->access.fifo_full(h))) {
32184+ if ((h->access->fifo_full(h))) {
32185 printk(KERN_WARNING "cciss: fifo full\n");
32186 break;
32187 }
32188@@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
32189 h->Qdepth--;
32190
32191 /* Tell the controller execute command */
32192- h->access.submit_command(h, c);
32193+ h->access->submit_command(h, c);
32194
32195 /* Put job onto the completed Q */
32196 addQ(&h->cmpQ, c);
32197@@ -3393,17 +3395,17 @@ startio:
32198
32199 static inline unsigned long get_next_completion(ctlr_info_t *h)
32200 {
32201- return h->access.command_completed(h);
32202+ return h->access->command_completed(h);
32203 }
32204
32205 static inline int interrupt_pending(ctlr_info_t *h)
32206 {
32207- return h->access.intr_pending(h);
32208+ return h->access->intr_pending(h);
32209 }
32210
32211 static inline long interrupt_not_for_us(ctlr_info_t *h)
32212 {
32213- return (((h->access.intr_pending(h) == 0) ||
32214+ return (((h->access->intr_pending(h) == 0) ||
32215 (h->interrupts_enabled == 0)));
32216 }
32217
32218@@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
32219 */
32220 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
32221 c->product_name = products[prod_index].product_name;
32222- c->access = *(products[prod_index].access);
32223+ c->access = products[prod_index].access;
32224 c->nr_cmds = c->max_commands - 4;
32225 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
32226 (readb(&c->cfgtable->Signature[1]) != 'I') ||
32227@@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
32228 }
32229
32230 /* make sure the board interrupts are off */
32231- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
32232+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
32233 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
32234 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
32235 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
32236@@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
32237 cciss_scsi_setup(i);
32238
32239 /* Turn the interrupts on so we can service requests */
32240- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
32241+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
32242
32243 /* Get the firmware version */
32244 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
32245diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
32246index 04d6bf8..36e712d 100644
32247--- a/drivers/block/cciss.h
32248+++ b/drivers/block/cciss.h
32249@@ -90,7 +90,7 @@ struct ctlr_info
32250 // information about each logical volume
32251 drive_info_struct *drv[CISS_MAX_LUN];
32252
32253- struct access_method access;
32254+ struct access_method *access;
32255
32256 /* queue and queue Info */
32257 struct hlist_head reqQ;
32258diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
32259index 6422651..bb1bdef 100644
32260--- a/drivers/block/cpqarray.c
32261+++ b/drivers/block/cpqarray.c
32262@@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
32263 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
32264 goto Enomem4;
32265 }
32266- hba[i]->access.set_intr_mask(hba[i], 0);
32267+ hba[i]->access->set_intr_mask(hba[i], 0);
32268 if (request_irq(hba[i]->intr, do_ida_intr,
32269 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
32270 {
32271@@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
32272 add_timer(&hba[i]->timer);
32273
32274 /* Enable IRQ now that spinlock and rate limit timer are set up */
32275- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
32276+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
32277
32278 for(j=0; j<NWD; j++) {
32279 struct gendisk *disk = ida_gendisk[i][j];
32280@@ -695,7 +695,7 @@ DBGINFO(
32281 for(i=0; i<NR_PRODUCTS; i++) {
32282 if (board_id == products[i].board_id) {
32283 c->product_name = products[i].product_name;
32284- c->access = *(products[i].access);
32285+ c->access = products[i].access;
32286 break;
32287 }
32288 }
32289@@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(void)
32290 hba[ctlr]->intr = intr;
32291 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
32292 hba[ctlr]->product_name = products[j].product_name;
32293- hba[ctlr]->access = *(products[j].access);
32294+ hba[ctlr]->access = products[j].access;
32295 hba[ctlr]->ctlr = ctlr;
32296 hba[ctlr]->board_id = board_id;
32297 hba[ctlr]->pci_dev = NULL; /* not PCI */
32298@@ -896,6 +896,8 @@ static void do_ida_request(struct request_queue *q)
32299 struct scatterlist tmp_sg[SG_MAX];
32300 int i, dir, seg;
32301
32302+ pax_track_stack();
32303+
32304 if (blk_queue_plugged(q))
32305 goto startio;
32306
32307@@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
32308
32309 while((c = h->reqQ) != NULL) {
32310 /* Can't do anything if we're busy */
32311- if (h->access.fifo_full(h) == 0)
32312+ if (h->access->fifo_full(h) == 0)
32313 return;
32314
32315 /* Get the first entry from the request Q */
32316@@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
32317 h->Qdepth--;
32318
32319 /* Tell the controller to do our bidding */
32320- h->access.submit_command(h, c);
32321+ h->access->submit_command(h, c);
32322
32323 /* Get onto the completion Q */
32324 addQ(&h->cmpQ, c);
32325@@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
32326 unsigned long flags;
32327 __u32 a,a1;
32328
32329- istat = h->access.intr_pending(h);
32330+ istat = h->access->intr_pending(h);
32331 /* Is this interrupt for us? */
32332 if (istat == 0)
32333 return IRQ_NONE;
32334@@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
32335 */
32336 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
32337 if (istat & FIFO_NOT_EMPTY) {
32338- while((a = h->access.command_completed(h))) {
32339+ while((a = h->access->command_completed(h))) {
32340 a1 = a; a &= ~3;
32341 if ((c = h->cmpQ) == NULL)
32342 {
32343@@ -1434,11 +1436,11 @@ static int sendcmd(
32344 /*
32345 * Disable interrupt
32346 */
32347- info_p->access.set_intr_mask(info_p, 0);
32348+ info_p->access->set_intr_mask(info_p, 0);
32349 /* Make sure there is room in the command FIFO */
32350 /* Actually it should be completely empty at this time. */
32351 for (i = 200000; i > 0; i--) {
32352- temp = info_p->access.fifo_full(info_p);
32353+ temp = info_p->access->fifo_full(info_p);
32354 if (temp != 0) {
32355 break;
32356 }
32357@@ -1451,7 +1453,7 @@ DBG(
32358 /*
32359 * Send the cmd
32360 */
32361- info_p->access.submit_command(info_p, c);
32362+ info_p->access->submit_command(info_p, c);
32363 complete = pollcomplete(ctlr);
32364
32365 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
32366@@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t *host)
32367 * we check the new geometry. Then turn interrupts back on when
32368 * we're done.
32369 */
32370- host->access.set_intr_mask(host, 0);
32371+ host->access->set_intr_mask(host, 0);
32372 getgeometry(ctlr);
32373- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
32374+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
32375
32376 for(i=0; i<NWD; i++) {
32377 struct gendisk *disk = ida_gendisk[ctlr][i];
32378@@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
32379 /* Wait (up to 2 seconds) for a command to complete */
32380
32381 for (i = 200000; i > 0; i--) {
32382- done = hba[ctlr]->access.command_completed(hba[ctlr]);
32383+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
32384 if (done == 0) {
32385 udelay(10); /* a short fixed delay */
32386 } else
32387diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
32388index be73e9d..7fbf140 100644
32389--- a/drivers/block/cpqarray.h
32390+++ b/drivers/block/cpqarray.h
32391@@ -99,7 +99,7 @@ struct ctlr_info {
32392 drv_info_t drv[NWD];
32393 struct proc_dir_entry *proc;
32394
32395- struct access_method access;
32396+ struct access_method *access;
32397
32398 cmdlist_t *reqQ;
32399 cmdlist_t *cmpQ;
32400diff --git a/drivers/block/loop.c b/drivers/block/loop.c
32401index 8ec2d70..2804b30 100644
32402--- a/drivers/block/loop.c
32403+++ b/drivers/block/loop.c
32404@@ -282,7 +282,7 @@ static int __do_lo_send_write(struct file *file,
32405 mm_segment_t old_fs = get_fs();
32406
32407 set_fs(get_ds());
32408- bw = file->f_op->write(file, buf, len, &pos);
32409+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
32410 set_fs(old_fs);
32411 if (likely(bw == len))
32412 return 0;
32413diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
32414index 26ada47..083c480 100644
32415--- a/drivers/block/nbd.c
32416+++ b/drivers/block/nbd.c
32417@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
32418 struct kvec iov;
32419 sigset_t blocked, oldset;
32420
32421+ pax_track_stack();
32422+
32423 if (unlikely(!sock)) {
32424 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
32425 lo->disk->disk_name, (send ? "send" : "recv"));
32426@@ -569,6 +571,8 @@ static void do_nbd_request(struct request_queue *q)
32427 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
32428 unsigned int cmd, unsigned long arg)
32429 {
32430+ pax_track_stack();
32431+
32432 switch (cmd) {
32433 case NBD_DISCONNECT: {
32434 struct request sreq;
32435diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
32436index a5d585d..d087be3 100644
32437--- a/drivers/block/pktcdvd.c
32438+++ b/drivers/block/pktcdvd.c
32439@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
32440 return len;
32441 }
32442
32443-static struct sysfs_ops kobj_pkt_ops = {
32444+static const struct sysfs_ops kobj_pkt_ops = {
32445 .show = kobj_pkt_show,
32446 .store = kobj_pkt_store
32447 };
32448diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
32449index 6aad99e..89cd142 100644
32450--- a/drivers/char/Kconfig
32451+++ b/drivers/char/Kconfig
32452@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
32453
32454 config DEVKMEM
32455 bool "/dev/kmem virtual device support"
32456- default y
32457+ default n
32458+ depends on !GRKERNSEC_KMEM
32459 help
32460 Say Y here if you want to support the /dev/kmem device. The
32461 /dev/kmem device is rarely used, but can be used for certain
32462@@ -1114,6 +1115,7 @@ config DEVPORT
32463 bool
32464 depends on !M68K
32465 depends on ISA || PCI
32466+ depends on !GRKERNSEC_KMEM
32467 default y
32468
32469 source "drivers/s390/char/Kconfig"
32470diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
32471index a96f319..a778a5b 100644
32472--- a/drivers/char/agp/frontend.c
32473+++ b/drivers/char/agp/frontend.c
32474@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
32475 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
32476 return -EFAULT;
32477
32478- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
32479+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
32480 return -EFAULT;
32481
32482 client = agp_find_client_by_pid(reserve.pid);
32483diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
32484index d8cff90..9628e70 100644
32485--- a/drivers/char/briq_panel.c
32486+++ b/drivers/char/briq_panel.c
32487@@ -10,6 +10,7 @@
32488 #include <linux/types.h>
32489 #include <linux/errno.h>
32490 #include <linux/tty.h>
32491+#include <linux/mutex.h>
32492 #include <linux/timer.h>
32493 #include <linux/kernel.h>
32494 #include <linux/wait.h>
32495@@ -36,6 +37,7 @@ static int vfd_is_open;
32496 static unsigned char vfd[40];
32497 static int vfd_cursor;
32498 static unsigned char ledpb, led;
32499+static DEFINE_MUTEX(vfd_mutex);
32500
32501 static void update_vfd(void)
32502 {
32503@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
32504 if (!vfd_is_open)
32505 return -EBUSY;
32506
32507+ mutex_lock(&vfd_mutex);
32508 for (;;) {
32509 char c;
32510 if (!indx)
32511 break;
32512- if (get_user(c, buf))
32513+ if (get_user(c, buf)) {
32514+ mutex_unlock(&vfd_mutex);
32515 return -EFAULT;
32516+ }
32517 if (esc) {
32518 set_led(c);
32519 esc = 0;
32520@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
32521 buf++;
32522 }
32523 update_vfd();
32524+ mutex_unlock(&vfd_mutex);
32525
32526 return len;
32527 }
32528diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
32529index 31e7c91..161afc0 100644
32530--- a/drivers/char/genrtc.c
32531+++ b/drivers/char/genrtc.c
32532@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
32533 switch (cmd) {
32534
32535 case RTC_PLL_GET:
32536+ memset(&pll, 0, sizeof(pll));
32537 if (get_rtc_pll(&pll))
32538 return -EINVAL;
32539 else
32540diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
32541index 006466d..a2bb21c 100644
32542--- a/drivers/char/hpet.c
32543+++ b/drivers/char/hpet.c
32544@@ -430,7 +430,7 @@ static int hpet_release(struct inode *inode, struct file *file)
32545 return 0;
32546 }
32547
32548-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
32549+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
32550
32551 static int
32552 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
32553@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
32554 }
32555
32556 static int
32557-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
32558+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
32559 {
32560 struct hpet_timer __iomem *timer;
32561 struct hpet __iomem *hpet;
32562@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
32563 {
32564 struct hpet_info info;
32565
32566+ memset(&info, 0, sizeof(info));
32567+
32568 if (devp->hd_ireqfreq)
32569 info.hi_ireqfreq =
32570 hpet_time_div(hpetp, devp->hd_ireqfreq);
32571- else
32572- info.hi_ireqfreq = 0;
32573 info.hi_flags =
32574 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
32575 info.hi_hpet = hpetp->hp_which;
32576diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
32577index 0afc8b8..6913fc3 100644
32578--- a/drivers/char/hvc_beat.c
32579+++ b/drivers/char/hvc_beat.c
32580@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
32581 return cnt;
32582 }
32583
32584-static struct hv_ops hvc_beat_get_put_ops = {
32585+static const struct hv_ops hvc_beat_get_put_ops = {
32586 .get_chars = hvc_beat_get_chars,
32587 .put_chars = hvc_beat_put_chars,
32588 };
32589diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
32590index 98097f2..407dddc 100644
32591--- a/drivers/char/hvc_console.c
32592+++ b/drivers/char/hvc_console.c
32593@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
32594 * console interfaces but can still be used as a tty device. This has to be
32595 * static because kmalloc will not work during early console init.
32596 */
32597-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
32598+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
32599 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
32600 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
32601
32602@@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kref *kref)
32603 * vty adapters do NOT get an hvc_instantiate() callback since they
32604 * appear after early console init.
32605 */
32606-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
32607+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
32608 {
32609 struct hvc_struct *hp;
32610
32611@@ -758,7 +758,7 @@ static const struct tty_operations hvc_ops = {
32612 };
32613
32614 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
32615- struct hv_ops *ops, int outbuf_size)
32616+ const struct hv_ops *ops, int outbuf_size)
32617 {
32618 struct hvc_struct *hp;
32619 int i;
32620diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
32621index 10950ca..ed176c3 100644
32622--- a/drivers/char/hvc_console.h
32623+++ b/drivers/char/hvc_console.h
32624@@ -55,7 +55,7 @@ struct hvc_struct {
32625 int outbuf_size;
32626 int n_outbuf;
32627 uint32_t vtermno;
32628- struct hv_ops *ops;
32629+ const struct hv_ops *ops;
32630 int irq_requested;
32631 int data;
32632 struct winsize ws;
32633@@ -76,11 +76,11 @@ struct hv_ops {
32634 };
32635
32636 /* Register a vterm and a slot index for use as a console (console_init) */
32637-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
32638+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
32639
32640 /* register a vterm for hvc tty operation (module_init or hotplug add) */
32641 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
32642- struct hv_ops *ops, int outbuf_size);
32643+ const struct hv_ops *ops, int outbuf_size);
32644 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
32645 extern int hvc_remove(struct hvc_struct *hp);
32646
32647diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
32648index 936d05b..fd02426 100644
32649--- a/drivers/char/hvc_iseries.c
32650+++ b/drivers/char/hvc_iseries.c
32651@@ -197,7 +197,7 @@ done:
32652 return sent;
32653 }
32654
32655-static struct hv_ops hvc_get_put_ops = {
32656+static const struct hv_ops hvc_get_put_ops = {
32657 .get_chars = get_chars,
32658 .put_chars = put_chars,
32659 .notifier_add = notifier_add_irq,
32660diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
32661index b0e168f..69cda2a 100644
32662--- a/drivers/char/hvc_iucv.c
32663+++ b/drivers/char/hvc_iucv.c
32664@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
32665
32666
32667 /* HVC operations */
32668-static struct hv_ops hvc_iucv_ops = {
32669+static const struct hv_ops hvc_iucv_ops = {
32670 .get_chars = hvc_iucv_get_chars,
32671 .put_chars = hvc_iucv_put_chars,
32672 .notifier_add = hvc_iucv_notifier_add,
32673diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
32674index 88590d0..61c4a61 100644
32675--- a/drivers/char/hvc_rtas.c
32676+++ b/drivers/char/hvc_rtas.c
32677@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
32678 return i;
32679 }
32680
32681-static struct hv_ops hvc_rtas_get_put_ops = {
32682+static const struct hv_ops hvc_rtas_get_put_ops = {
32683 .get_chars = hvc_rtas_read_console,
32684 .put_chars = hvc_rtas_write_console,
32685 };
32686diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
32687index bd63ba8..b0957e6 100644
32688--- a/drivers/char/hvc_udbg.c
32689+++ b/drivers/char/hvc_udbg.c
32690@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
32691 return i;
32692 }
32693
32694-static struct hv_ops hvc_udbg_ops = {
32695+static const struct hv_ops hvc_udbg_ops = {
32696 .get_chars = hvc_udbg_get,
32697 .put_chars = hvc_udbg_put,
32698 };
32699diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
32700index 10be343..27370e9 100644
32701--- a/drivers/char/hvc_vio.c
32702+++ b/drivers/char/hvc_vio.c
32703@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
32704 return got;
32705 }
32706
32707-static struct hv_ops hvc_get_put_ops = {
32708+static const struct hv_ops hvc_get_put_ops = {
32709 .get_chars = filtered_get_chars,
32710 .put_chars = hvc_put_chars,
32711 .notifier_add = notifier_add_irq,
32712diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
32713index a6ee32b..94f8c26 100644
32714--- a/drivers/char/hvc_xen.c
32715+++ b/drivers/char/hvc_xen.c
32716@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
32717 return recv;
32718 }
32719
32720-static struct hv_ops hvc_ops = {
32721+static const struct hv_ops hvc_ops = {
32722 .get_chars = read_console,
32723 .put_chars = write_console,
32724 .notifier_add = notifier_add_irq,
32725diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
32726index 266b858..f3ee0bb 100644
32727--- a/drivers/char/hvcs.c
32728+++ b/drivers/char/hvcs.c
32729@@ -82,6 +82,7 @@
32730 #include <asm/hvcserver.h>
32731 #include <asm/uaccess.h>
32732 #include <asm/vio.h>
32733+#include <asm/local.h>
32734
32735 /*
32736 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32737@@ -269,7 +270,7 @@ struct hvcs_struct {
32738 unsigned int index;
32739
32740 struct tty_struct *tty;
32741- int open_count;
32742+ local_t open_count;
32743
32744 /*
32745 * Used to tell the driver kernel_thread what operations need to take
32746@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
32747
32748 spin_lock_irqsave(&hvcsd->lock, flags);
32749
32750- if (hvcsd->open_count > 0) {
32751+ if (local_read(&hvcsd->open_count) > 0) {
32752 spin_unlock_irqrestore(&hvcsd->lock, flags);
32753 printk(KERN_INFO "HVCS: vterm state unchanged. "
32754 "The hvcs device node is still in use.\n");
32755@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
32756 if ((retval = hvcs_partner_connect(hvcsd)))
32757 goto error_release;
32758
32759- hvcsd->open_count = 1;
32760+ local_set(&hvcsd->open_count, 1);
32761 hvcsd->tty = tty;
32762 tty->driver_data = hvcsd;
32763
32764@@ -1169,7 +1170,7 @@ fast_open:
32765
32766 spin_lock_irqsave(&hvcsd->lock, flags);
32767 kref_get(&hvcsd->kref);
32768- hvcsd->open_count++;
32769+ local_inc(&hvcsd->open_count);
32770 hvcsd->todo_mask |= HVCS_SCHED_READ;
32771 spin_unlock_irqrestore(&hvcsd->lock, flags);
32772
32773@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
32774 hvcsd = tty->driver_data;
32775
32776 spin_lock_irqsave(&hvcsd->lock, flags);
32777- if (--hvcsd->open_count == 0) {
32778+ if (local_dec_and_test(&hvcsd->open_count)) {
32779
32780 vio_disable_interrupts(hvcsd->vdev);
32781
32782@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
32783 free_irq(irq, hvcsd);
32784 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32785 return;
32786- } else if (hvcsd->open_count < 0) {
32787+ } else if (local_read(&hvcsd->open_count) < 0) {
32788 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32789 " is missmanaged.\n",
32790- hvcsd->vdev->unit_address, hvcsd->open_count);
32791+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32792 }
32793
32794 spin_unlock_irqrestore(&hvcsd->lock, flags);
32795@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty)
32796
32797 spin_lock_irqsave(&hvcsd->lock, flags);
32798 /* Preserve this so that we know how many kref refs to put */
32799- temp_open_count = hvcsd->open_count;
32800+ temp_open_count = local_read(&hvcsd->open_count);
32801
32802 /*
32803 * Don't kref put inside the spinlock because the destruction
32804@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty)
32805 hvcsd->tty->driver_data = NULL;
32806 hvcsd->tty = NULL;
32807
32808- hvcsd->open_count = 0;
32809+ local_set(&hvcsd->open_count, 0);
32810
32811 /* This will drop any buffered data on the floor which is OK in a hangup
32812 * scenario. */
32813@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty,
32814 * the middle of a write operation? This is a crummy place to do this
32815 * but we want to keep it all in the spinlock.
32816 */
32817- if (hvcsd->open_count <= 0) {
32818+ if (local_read(&hvcsd->open_count) <= 0) {
32819 spin_unlock_irqrestore(&hvcsd->lock, flags);
32820 return -ENODEV;
32821 }
32822@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty)
32823 {
32824 struct hvcs_struct *hvcsd = tty->driver_data;
32825
32826- if (!hvcsd || hvcsd->open_count <= 0)
32827+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32828 return 0;
32829
32830 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32831diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
32832index ec5e3f8..02455ba 100644
32833--- a/drivers/char/ipmi/ipmi_msghandler.c
32834+++ b/drivers/char/ipmi/ipmi_msghandler.c
32835@@ -414,7 +414,7 @@ struct ipmi_smi {
32836 struct proc_dir_entry *proc_dir;
32837 char proc_dir_name[10];
32838
32839- atomic_t stats[IPMI_NUM_STATS];
32840+ atomic_unchecked_t stats[IPMI_NUM_STATS];
32841
32842 /*
32843 * run_to_completion duplicate of smb_info, smi_info
32844@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
32845
32846
32847 #define ipmi_inc_stat(intf, stat) \
32848- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
32849+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
32850 #define ipmi_get_stat(intf, stat) \
32851- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
32852+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
32853
32854 static int is_lan_addr(struct ipmi_addr *addr)
32855 {
32856@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
32857 INIT_LIST_HEAD(&intf->cmd_rcvrs);
32858 init_waitqueue_head(&intf->waitq);
32859 for (i = 0; i < IPMI_NUM_STATS; i++)
32860- atomic_set(&intf->stats[i], 0);
32861+ atomic_set_unchecked(&intf->stats[i], 0);
32862
32863 intf->proc_dir = NULL;
32864
32865@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
32866 struct ipmi_smi_msg smi_msg;
32867 struct ipmi_recv_msg recv_msg;
32868
32869+ pax_track_stack();
32870+
32871 si = (struct ipmi_system_interface_addr *) &addr;
32872 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
32873 si->channel = IPMI_BMC_CHANNEL;
32874diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
32875index abae8c9..8021979 100644
32876--- a/drivers/char/ipmi/ipmi_si_intf.c
32877+++ b/drivers/char/ipmi/ipmi_si_intf.c
32878@@ -277,7 +277,7 @@ struct smi_info {
32879 unsigned char slave_addr;
32880
32881 /* Counters and things for the proc filesystem. */
32882- atomic_t stats[SI_NUM_STATS];
32883+ atomic_unchecked_t stats[SI_NUM_STATS];
32884
32885 struct task_struct *thread;
32886
32887@@ -285,9 +285,9 @@ struct smi_info {
32888 };
32889
32890 #define smi_inc_stat(smi, stat) \
32891- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
32892+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
32893 #define smi_get_stat(smi, stat) \
32894- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
32895+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
32896
32897 #define SI_MAX_PARMS 4
32898
32899@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info *new_smi)
32900 atomic_set(&new_smi->req_events, 0);
32901 new_smi->run_to_completion = 0;
32902 for (i = 0; i < SI_NUM_STATS; i++)
32903- atomic_set(&new_smi->stats[i], 0);
32904+ atomic_set_unchecked(&new_smi->stats[i], 0);
32905
32906 new_smi->interrupt_disabled = 0;
32907 atomic_set(&new_smi->stop_operation, 0);
32908diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
32909index 402838f..55e2200 100644
32910--- a/drivers/char/istallion.c
32911+++ b/drivers/char/istallion.c
32912@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios = {
32913 * re-used for each stats call.
32914 */
32915 static comstats_t stli_comstats;
32916-static combrd_t stli_brdstats;
32917 static struct asystats stli_cdkstats;
32918
32919 /*****************************************************************************/
32920@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
32921 {
32922 struct stlibrd *brdp;
32923 unsigned int i;
32924+ combrd_t stli_brdstats;
32925
32926 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
32927 return -EFAULT;
32928@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stliport __user *arg)
32929 struct stliport stli_dummyport;
32930 struct stliport *portp;
32931
32932+ pax_track_stack();
32933+
32934 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
32935 return -EFAULT;
32936 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
32937@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
32938 struct stlibrd stli_dummybrd;
32939 struct stlibrd *brdp;
32940
32941+ pax_track_stack();
32942+
32943 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
32944 return -EFAULT;
32945 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
32946diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
32947index 950837c..e55a288 100644
32948--- a/drivers/char/keyboard.c
32949+++ b/drivers/char/keyboard.c
32950@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
32951 kbd->kbdmode == VC_MEDIUMRAW) &&
32952 value != KVAL(K_SAK))
32953 return; /* SAK is allowed even in raw mode */
32954+
32955+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
32956+ {
32957+ void *func = fn_handler[value];
32958+ if (func == fn_show_state || func == fn_show_ptregs ||
32959+ func == fn_show_mem)
32960+ return;
32961+ }
32962+#endif
32963+
32964 fn_handler[value](vc);
32965 }
32966
32967@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_ids[] = {
32968 .evbit = { BIT_MASK(EV_SND) },
32969 },
32970
32971- { }, /* Terminating entry */
32972+ { 0 }, /* Terminating entry */
32973 };
32974
32975 MODULE_DEVICE_TABLE(input, kbd_ids);
32976diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
32977index 87c67b4..230527a 100644
32978--- a/drivers/char/mbcs.c
32979+++ b/drivers/char/mbcs.c
32980@@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
32981 return 0;
32982 }
32983
32984-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
32985+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
32986 {
32987 .part_num = MBCS_PART_NUM,
32988 .mfg_num = MBCS_MFG_NUM,
32989diff --git a/drivers/char/mem.c b/drivers/char/mem.c
32990index 1270f64..8495f49 100644
32991--- a/drivers/char/mem.c
32992+++ b/drivers/char/mem.c
32993@@ -18,6 +18,7 @@
32994 #include <linux/raw.h>
32995 #include <linux/tty.h>
32996 #include <linux/capability.h>
32997+#include <linux/security.h>
32998 #include <linux/ptrace.h>
32999 #include <linux/device.h>
33000 #include <linux/highmem.h>
33001@@ -35,6 +36,10 @@
33002 # include <linux/efi.h>
33003 #endif
33004
33005+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
33006+extern struct file_operations grsec_fops;
33007+#endif
33008+
33009 static inline unsigned long size_inside_page(unsigned long start,
33010 unsigned long size)
33011 {
33012@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33013
33014 while (cursor < to) {
33015 if (!devmem_is_allowed(pfn)) {
33016+#ifdef CONFIG_GRKERNSEC_KMEM
33017+ gr_handle_mem_readwrite(from, to);
33018+#else
33019 printk(KERN_INFO
33020 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
33021 current->comm, from, to);
33022+#endif
33023 return 0;
33024 }
33025 cursor += PAGE_SIZE;
33026@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33027 }
33028 return 1;
33029 }
33030+#elif defined(CONFIG_GRKERNSEC_KMEM)
33031+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33032+{
33033+ return 0;
33034+}
33035 #else
33036 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33037 {
33038@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * file, char __user * buf,
33039 #endif
33040
33041 while (count > 0) {
33042+ char *temp;
33043+
33044 /*
33045 * Handle first page in case it's not aligned
33046 */
33047@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * file, char __user * buf,
33048 if (!ptr)
33049 return -EFAULT;
33050
33051- if (copy_to_user(buf, ptr, sz)) {
33052+#ifdef CONFIG_PAX_USERCOPY
33053+ temp = kmalloc(sz, GFP_KERNEL);
33054+ if (!temp) {
33055+ unxlate_dev_mem_ptr(p, ptr);
33056+ return -ENOMEM;
33057+ }
33058+ memcpy(temp, ptr, sz);
33059+#else
33060+ temp = ptr;
33061+#endif
33062+
33063+ if (copy_to_user(buf, temp, sz)) {
33064+
33065+#ifdef CONFIG_PAX_USERCOPY
33066+ kfree(temp);
33067+#endif
33068+
33069 unxlate_dev_mem_ptr(p, ptr);
33070 return -EFAULT;
33071 }
33072
33073+#ifdef CONFIG_PAX_USERCOPY
33074+ kfree(temp);
33075+#endif
33076+
33077 unxlate_dev_mem_ptr(p, ptr);
33078
33079 buf += sz;
33080@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33081 size_t count, loff_t *ppos)
33082 {
33083 unsigned long p = *ppos;
33084- ssize_t low_count, read, sz;
33085+ ssize_t low_count, read, sz, err = 0;
33086 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
33087- int err = 0;
33088
33089 read = 0;
33090 if (p < (unsigned long) high_memory) {
33091@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33092 }
33093 #endif
33094 while (low_count > 0) {
33095+ char *temp;
33096+
33097 sz = size_inside_page(p, low_count);
33098
33099 /*
33100@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33101 */
33102 kbuf = xlate_dev_kmem_ptr((char *)p);
33103
33104- if (copy_to_user(buf, kbuf, sz))
33105+#ifdef CONFIG_PAX_USERCOPY
33106+ temp = kmalloc(sz, GFP_KERNEL);
33107+ if (!temp)
33108+ return -ENOMEM;
33109+ memcpy(temp, kbuf, sz);
33110+#else
33111+ temp = kbuf;
33112+#endif
33113+
33114+ err = copy_to_user(buf, temp, sz);
33115+
33116+#ifdef CONFIG_PAX_USERCOPY
33117+ kfree(temp);
33118+#endif
33119+
33120+ if (err)
33121 return -EFAULT;
33122 buf += sz;
33123 p += sz;
33124@@ -889,6 +941,9 @@ static const struct memdev {
33125 #ifdef CONFIG_CRASH_DUMP
33126 [12] = { "oldmem", 0, &oldmem_fops, NULL },
33127 #endif
33128+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
33129+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
33130+#endif
33131 };
33132
33133 static int memory_open(struct inode *inode, struct file *filp)
33134diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
33135index 918711a..4ffaf5e 100644
33136--- a/drivers/char/mmtimer.c
33137+++ b/drivers/char/mmtimer.c
33138@@ -756,7 +756,7 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
33139 return err;
33140 }
33141
33142-static struct k_clock sgi_clock = {
33143+static k_clock_no_const sgi_clock = {
33144 .res = 0,
33145 .clock_set = sgi_clock_set,
33146 .clock_get = sgi_clock_get,
33147diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
33148index 674b3ab..a8d1970 100644
33149--- a/drivers/char/pcmcia/ipwireless/tty.c
33150+++ b/drivers/char/pcmcia/ipwireless/tty.c
33151@@ -29,6 +29,7 @@
33152 #include <linux/tty_driver.h>
33153 #include <linux/tty_flip.h>
33154 #include <linux/uaccess.h>
33155+#include <asm/local.h>
33156
33157 #include "tty.h"
33158 #include "network.h"
33159@@ -51,7 +52,7 @@ struct ipw_tty {
33160 int tty_type;
33161 struct ipw_network *network;
33162 struct tty_struct *linux_tty;
33163- int open_count;
33164+ local_t open_count;
33165 unsigned int control_lines;
33166 struct mutex ipw_tty_mutex;
33167 int tx_bytes_queued;
33168@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
33169 mutex_unlock(&tty->ipw_tty_mutex);
33170 return -ENODEV;
33171 }
33172- if (tty->open_count == 0)
33173+ if (local_read(&tty->open_count) == 0)
33174 tty->tx_bytes_queued = 0;
33175
33176- tty->open_count++;
33177+ local_inc(&tty->open_count);
33178
33179 tty->linux_tty = linux_tty;
33180 linux_tty->driver_data = tty;
33181@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
33182
33183 static void do_ipw_close(struct ipw_tty *tty)
33184 {
33185- tty->open_count--;
33186-
33187- if (tty->open_count == 0) {
33188+ if (local_dec_return(&tty->open_count) == 0) {
33189 struct tty_struct *linux_tty = tty->linux_tty;
33190
33191 if (linux_tty != NULL) {
33192@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
33193 return;
33194
33195 mutex_lock(&tty->ipw_tty_mutex);
33196- if (tty->open_count == 0) {
33197+ if (local_read(&tty->open_count) == 0) {
33198 mutex_unlock(&tty->ipw_tty_mutex);
33199 return;
33200 }
33201@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
33202 return;
33203 }
33204
33205- if (!tty->open_count) {
33206+ if (!local_read(&tty->open_count)) {
33207 mutex_unlock(&tty->ipw_tty_mutex);
33208 return;
33209 }
33210@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
33211 return -ENODEV;
33212
33213 mutex_lock(&tty->ipw_tty_mutex);
33214- if (!tty->open_count) {
33215+ if (!local_read(&tty->open_count)) {
33216 mutex_unlock(&tty->ipw_tty_mutex);
33217 return -EINVAL;
33218 }
33219@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
33220 if (!tty)
33221 return -ENODEV;
33222
33223- if (!tty->open_count)
33224+ if (!local_read(&tty->open_count))
33225 return -EINVAL;
33226
33227 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
33228@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
33229 if (!tty)
33230 return 0;
33231
33232- if (!tty->open_count)
33233+ if (!local_read(&tty->open_count))
33234 return 0;
33235
33236 return tty->tx_bytes_queued;
33237@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
33238 if (!tty)
33239 return -ENODEV;
33240
33241- if (!tty->open_count)
33242+ if (!local_read(&tty->open_count))
33243 return -EINVAL;
33244
33245 return get_control_lines(tty);
33246@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
33247 if (!tty)
33248 return -ENODEV;
33249
33250- if (!tty->open_count)
33251+ if (!local_read(&tty->open_count))
33252 return -EINVAL;
33253
33254 return set_control_lines(tty, set, clear);
33255@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
33256 if (!tty)
33257 return -ENODEV;
33258
33259- if (!tty->open_count)
33260+ if (!local_read(&tty->open_count))
33261 return -EINVAL;
33262
33263 /* FIXME: Exactly how is the tty object locked here .. */
33264@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
33265 against a parallel ioctl etc */
33266 mutex_lock(&ttyj->ipw_tty_mutex);
33267 }
33268- while (ttyj->open_count)
33269+ while (local_read(&ttyj->open_count))
33270 do_ipw_close(ttyj);
33271 ipwireless_disassociate_network_ttys(network,
33272 ttyj->channel_idx);
33273diff --git a/drivers/char/pty.c b/drivers/char/pty.c
33274index 62f282e..e45c45c 100644
33275--- a/drivers/char/pty.c
33276+++ b/drivers/char/pty.c
33277@@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
33278 register_sysctl_table(pty_root_table);
33279
33280 /* Now create the /dev/ptmx special device */
33281+ pax_open_kernel();
33282 tty_default_fops(&ptmx_fops);
33283- ptmx_fops.open = ptmx_open;
33284+ *(void **)&ptmx_fops.open = ptmx_open;
33285+ pax_close_kernel();
33286
33287 cdev_init(&ptmx_cdev, &ptmx_fops);
33288 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
33289diff --git a/drivers/char/random.c b/drivers/char/random.c
33290index 3a19e2d..6ed09d3 100644
33291--- a/drivers/char/random.c
33292+++ b/drivers/char/random.c
33293@@ -254,8 +254,13 @@
33294 /*
33295 * Configuration information
33296 */
33297+#ifdef CONFIG_GRKERNSEC_RANDNET
33298+#define INPUT_POOL_WORDS 512
33299+#define OUTPUT_POOL_WORDS 128
33300+#else
33301 #define INPUT_POOL_WORDS 128
33302 #define OUTPUT_POOL_WORDS 32
33303+#endif
33304 #define SEC_XFER_SIZE 512
33305
33306 /*
33307@@ -292,10 +297,17 @@ static struct poolinfo {
33308 int poolwords;
33309 int tap1, tap2, tap3, tap4, tap5;
33310 } poolinfo_table[] = {
33311+#ifdef CONFIG_GRKERNSEC_RANDNET
33312+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
33313+ { 512, 411, 308, 208, 104, 1 },
33314+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
33315+ { 128, 103, 76, 51, 25, 1 },
33316+#else
33317 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
33318 { 128, 103, 76, 51, 25, 1 },
33319 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
33320 { 32, 26, 20, 14, 7, 1 },
33321+#endif
33322 #if 0
33323 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
33324 { 2048, 1638, 1231, 819, 411, 1 },
33325@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
33326 #include <linux/sysctl.h>
33327
33328 static int min_read_thresh = 8, min_write_thresh;
33329-static int max_read_thresh = INPUT_POOL_WORDS * 32;
33330+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
33331 static int max_write_thresh = INPUT_POOL_WORDS * 32;
33332 static char sysctl_bootid[16];
33333
33334diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
33335index 0e29a23..0efc2c2 100644
33336--- a/drivers/char/rocket.c
33337+++ b/drivers/char/rocket.c
33338@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
33339 struct rocket_ports tmp;
33340 int board;
33341
33342+ pax_track_stack();
33343+
33344 if (!retports)
33345 return -EFAULT;
33346 memset(&tmp, 0, sizeof (tmp));
33347diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
33348index 8c262aa..4d3b058 100644
33349--- a/drivers/char/sonypi.c
33350+++ b/drivers/char/sonypi.c
33351@@ -55,6 +55,7 @@
33352 #include <asm/uaccess.h>
33353 #include <asm/io.h>
33354 #include <asm/system.h>
33355+#include <asm/local.h>
33356
33357 #include <linux/sonypi.h>
33358
33359@@ -491,7 +492,7 @@ static struct sonypi_device {
33360 spinlock_t fifo_lock;
33361 wait_queue_head_t fifo_proc_list;
33362 struct fasync_struct *fifo_async;
33363- int open_count;
33364+ local_t open_count;
33365 int model;
33366 struct input_dev *input_jog_dev;
33367 struct input_dev *input_key_dev;
33368@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
33369 static int sonypi_misc_release(struct inode *inode, struct file *file)
33370 {
33371 mutex_lock(&sonypi_device.lock);
33372- sonypi_device.open_count--;
33373+ local_dec(&sonypi_device.open_count);
33374 mutex_unlock(&sonypi_device.lock);
33375 return 0;
33376 }
33377@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
33378 lock_kernel();
33379 mutex_lock(&sonypi_device.lock);
33380 /* Flush input queue on first open */
33381- if (!sonypi_device.open_count)
33382+ if (!local_read(&sonypi_device.open_count))
33383 kfifo_reset(sonypi_device.fifo);
33384- sonypi_device.open_count++;
33385+ local_inc(&sonypi_device.open_count);
33386 mutex_unlock(&sonypi_device.lock);
33387 unlock_kernel();
33388 return 0;
33389diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
33390index db6dcfa..13834cb 100644
33391--- a/drivers/char/stallion.c
33392+++ b/drivers/char/stallion.c
33393@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg)
33394 struct stlport stl_dummyport;
33395 struct stlport *portp;
33396
33397+ pax_track_stack();
33398+
33399 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
33400 return -EFAULT;
33401 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
33402diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
33403index a0789f6..cea3902 100644
33404--- a/drivers/char/tpm/tpm.c
33405+++ b/drivers/char/tpm/tpm.c
33406@@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
33407 chip->vendor.req_complete_val)
33408 goto out_recv;
33409
33410- if ((status == chip->vendor.req_canceled)) {
33411+ if (status == chip->vendor.req_canceled) {
33412 dev_err(chip->dev, "Operation Canceled\n");
33413 rc = -ECANCELED;
33414 goto out;
33415@@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
33416
33417 struct tpm_chip *chip = dev_get_drvdata(dev);
33418
33419+ pax_track_stack();
33420+
33421 tpm_cmd.header.in = tpm_readpubek_header;
33422 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
33423 "attempting to read the PUBEK");
33424diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
33425index bf2170f..ce8cab9 100644
33426--- a/drivers/char/tpm/tpm_bios.c
33427+++ b/drivers/char/tpm/tpm_bios.c
33428@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
33429 event = addr;
33430
33431 if ((event->event_type == 0 && event->event_size == 0) ||
33432- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
33433+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
33434 return NULL;
33435
33436 return addr;
33437@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
33438 return NULL;
33439
33440 if ((event->event_type == 0 && event->event_size == 0) ||
33441- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
33442+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
33443 return NULL;
33444
33445 (*pos)++;
33446@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
33447 int i;
33448
33449 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
33450- seq_putc(m, data[i]);
33451+ if (!seq_putc(m, data[i]))
33452+ return -EFAULT;
33453
33454 return 0;
33455 }
33456@@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log)
33457 log->bios_event_log_end = log->bios_event_log + len;
33458
33459 virt = acpi_os_map_memory(start, len);
33460+ if (!virt) {
33461+ kfree(log->bios_event_log);
33462+ log->bios_event_log = NULL;
33463+ return -EFAULT;
33464+ }
33465
33466- memcpy(log->bios_event_log, virt, len);
33467+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
33468
33469 acpi_os_unmap_memory(virt, len);
33470 return 0;
33471diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
33472index 123cedf..6664cb4 100644
33473--- a/drivers/char/tty_io.c
33474+++ b/drivers/char/tty_io.c
33475@@ -146,7 +146,7 @@ static int tty_open(struct inode *, struct file *);
33476 static int tty_release(struct inode *, struct file *);
33477 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
33478 #ifdef CONFIG_COMPAT
33479-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
33480+long tty_compat_ioctl(struct file *file, unsigned int cmd,
33481 unsigned long arg);
33482 #else
33483 #define tty_compat_ioctl NULL
33484@@ -1774,6 +1774,7 @@ got_driver:
33485
33486 if (IS_ERR(tty)) {
33487 mutex_unlock(&tty_mutex);
33488+ tty_driver_kref_put(driver);
33489 return PTR_ERR(tty);
33490 }
33491 }
33492@@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
33493 return retval;
33494 }
33495
33496+EXPORT_SYMBOL(tty_ioctl);
33497+
33498 #ifdef CONFIG_COMPAT
33499-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
33500+long tty_compat_ioctl(struct file *file, unsigned int cmd,
33501 unsigned long arg)
33502 {
33503 struct inode *inode = file->f_dentry->d_inode;
33504@@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
33505
33506 return retval;
33507 }
33508+
33509+EXPORT_SYMBOL(tty_compat_ioctl);
33510 #endif
33511
33512 /*
33513@@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
33514
33515 void tty_default_fops(struct file_operations *fops)
33516 {
33517- *fops = tty_fops;
33518+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
33519 }
33520
33521 /*
33522diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
33523index d814a3d..b55b9c9 100644
33524--- a/drivers/char/tty_ldisc.c
33525+++ b/drivers/char/tty_ldisc.c
33526@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld)
33527 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33528 struct tty_ldisc_ops *ldo = ld->ops;
33529
33530- ldo->refcount--;
33531+ atomic_dec(&ldo->refcount);
33532 module_put(ldo->owner);
33533 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33534
33535@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
33536 spin_lock_irqsave(&tty_ldisc_lock, flags);
33537 tty_ldiscs[disc] = new_ldisc;
33538 new_ldisc->num = disc;
33539- new_ldisc->refcount = 0;
33540+ atomic_set(&new_ldisc->refcount, 0);
33541 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33542
33543 return ret;
33544@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33545 return -EINVAL;
33546
33547 spin_lock_irqsave(&tty_ldisc_lock, flags);
33548- if (tty_ldiscs[disc]->refcount)
33549+ if (atomic_read(&tty_ldiscs[disc]->refcount))
33550 ret = -EBUSY;
33551 else
33552 tty_ldiscs[disc] = NULL;
33553@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
33554 if (ldops) {
33555 ret = ERR_PTR(-EAGAIN);
33556 if (try_module_get(ldops->owner)) {
33557- ldops->refcount++;
33558+ atomic_inc(&ldops->refcount);
33559 ret = ldops;
33560 }
33561 }
33562@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
33563 unsigned long flags;
33564
33565 spin_lock_irqsave(&tty_ldisc_lock, flags);
33566- ldops->refcount--;
33567+ atomic_dec(&ldops->refcount);
33568 module_put(ldops->owner);
33569 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33570 }
33571diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
33572index a035ae3..c27fe2c 100644
33573--- a/drivers/char/virtio_console.c
33574+++ b/drivers/char/virtio_console.c
33575@@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count)
33576 * virtqueue, so we let the drivers do some boutique early-output thing. */
33577 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
33578 {
33579- virtio_cons.put_chars = put_chars;
33580+ pax_open_kernel();
33581+ *(void **)&virtio_cons.put_chars = put_chars;
33582+ pax_close_kernel();
33583 return hvc_instantiate(0, 0, &virtio_cons);
33584 }
33585
33586@@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
33587 out_vq = vqs[1];
33588
33589 /* Start using the new console output. */
33590- virtio_cons.get_chars = get_chars;
33591- virtio_cons.put_chars = put_chars;
33592- virtio_cons.notifier_add = notifier_add_vio;
33593- virtio_cons.notifier_del = notifier_del_vio;
33594- virtio_cons.notifier_hangup = notifier_del_vio;
33595+ pax_open_kernel();
33596+ *(void **)&virtio_cons.get_chars = get_chars;
33597+ *(void **)&virtio_cons.put_chars = put_chars;
33598+ *(void **)&virtio_cons.notifier_add = notifier_add_vio;
33599+ *(void **)&virtio_cons.notifier_del = notifier_del_vio;
33600+ *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
33601+ pax_close_kernel();
33602
33603 /* The first argument of hvc_alloc() is the virtual console number, so
33604 * we use zero. The second argument is the parameter for the
33605diff --git a/drivers/char/vt.c b/drivers/char/vt.c
33606index 0c80c68..53d59c1 100644
33607--- a/drivers/char/vt.c
33608+++ b/drivers/char/vt.c
33609@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
33610
33611 static void notify_write(struct vc_data *vc, unsigned int unicode)
33612 {
33613- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33614+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
33615 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33616 }
33617
33618diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
33619index 6351a26..999af95 100644
33620--- a/drivers/char/vt_ioctl.c
33621+++ b/drivers/char/vt_ioctl.c
33622@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
33623 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33624 return -EFAULT;
33625
33626- if (!capable(CAP_SYS_TTY_CONFIG))
33627- perm = 0;
33628-
33629 switch (cmd) {
33630 case KDGKBENT:
33631 key_map = key_maps[s];
33632@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
33633 val = (i ? K_HOLE : K_NOSUCHMAP);
33634 return put_user(val, &user_kbe->kb_value);
33635 case KDSKBENT:
33636+ if (!capable(CAP_SYS_TTY_CONFIG))
33637+ perm = 0;
33638+
33639 if (!perm)
33640 return -EPERM;
33641+
33642 if (!i && v == K_NOSUCHMAP) {
33643 /* deallocate map */
33644 key_map = key_maps[s];
33645@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
33646 int i, j, k;
33647 int ret;
33648
33649- if (!capable(CAP_SYS_TTY_CONFIG))
33650- perm = 0;
33651-
33652 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33653 if (!kbs) {
33654 ret = -ENOMEM;
33655@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
33656 kfree(kbs);
33657 return ((p && *p) ? -EOVERFLOW : 0);
33658 case KDSKBSENT:
33659+ if (!capable(CAP_SYS_TTY_CONFIG))
33660+ perm = 0;
33661+
33662 if (!perm) {
33663 ret = -EPERM;
33664 goto reterr;
33665diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
33666index c7ae026..1769c1d 100644
33667--- a/drivers/cpufreq/cpufreq.c
33668+++ b/drivers/cpufreq/cpufreq.c
33669@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj)
33670 complete(&policy->kobj_unregister);
33671 }
33672
33673-static struct sysfs_ops sysfs_ops = {
33674+static const struct sysfs_ops sysfs_ops = {
33675 .show = show,
33676 .store = store,
33677 };
33678diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
33679index 97b0038..2056670 100644
33680--- a/drivers/cpuidle/sysfs.c
33681+++ b/drivers/cpuidle/sysfs.c
33682@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
33683 return ret;
33684 }
33685
33686-static struct sysfs_ops cpuidle_sysfs_ops = {
33687+static const struct sysfs_ops cpuidle_sysfs_ops = {
33688 .show = cpuidle_show,
33689 .store = cpuidle_store,
33690 };
33691@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
33692 return ret;
33693 }
33694
33695-static struct sysfs_ops cpuidle_state_sysfs_ops = {
33696+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
33697 .show = cpuidle_state_show,
33698 };
33699
33700@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = {
33701 .release = cpuidle_state_sysfs_release,
33702 };
33703
33704-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
33705+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
33706 {
33707 kobject_put(&device->kobjs[i]->kobj);
33708 wait_for_completion(&device->kobjs[i]->kobj_unregister);
33709diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
33710index 5f753fc..0377ae9 100644
33711--- a/drivers/crypto/hifn_795x.c
33712+++ b/drivers/crypto/hifn_795x.c
33713@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
33714 0xCA, 0x34, 0x2B, 0x2E};
33715 struct scatterlist sg;
33716
33717+ pax_track_stack();
33718+
33719 memset(src, 0, sizeof(src));
33720 memset(ctx.key, 0, sizeof(ctx.key));
33721
33722diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
33723index 71e6482..de8d96c 100644
33724--- a/drivers/crypto/padlock-aes.c
33725+++ b/drivers/crypto/padlock-aes.c
33726@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
33727 struct crypto_aes_ctx gen_aes;
33728 int cpu;
33729
33730+ pax_track_stack();
33731+
33732 if (key_len % 8) {
33733 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
33734 return -EINVAL;
33735diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
33736index dcc4ab7..cc834bb 100644
33737--- a/drivers/dma/ioat/dma.c
33738+++ b/drivers/dma/ioat/dma.c
33739@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
33740 return entry->show(&chan->common, page);
33741 }
33742
33743-struct sysfs_ops ioat_sysfs_ops = {
33744+const struct sysfs_ops ioat_sysfs_ops = {
33745 .show = ioat_attr_show,
33746 };
33747
33748diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
33749index bbc3e78..f2db62c 100644
33750--- a/drivers/dma/ioat/dma.h
33751+++ b/drivers/dma/ioat/dma.h
33752@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
33753 unsigned long *phys_complete);
33754 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
33755 void ioat_kobject_del(struct ioatdma_device *device);
33756-extern struct sysfs_ops ioat_sysfs_ops;
33757+extern const struct sysfs_ops ioat_sysfs_ops;
33758 extern struct ioat_sysfs_entry ioat_version_attr;
33759 extern struct ioat_sysfs_entry ioat_cap_attr;
33760 #endif /* IOATDMA_H */
33761diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
33762index 9908c9e..3ceb0e5 100644
33763--- a/drivers/dma/ioat/dma_v3.c
33764+++ b/drivers/dma/ioat/dma_v3.c
33765@@ -71,10 +71,10 @@
33766 /* provide a lookup table for setting the source address in the base or
33767 * extended descriptor of an xor or pq descriptor
33768 */
33769-static const u8 xor_idx_to_desc __read_mostly = 0xd0;
33770-static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
33771-static const u8 pq_idx_to_desc __read_mostly = 0xf8;
33772-static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
33773+static const u8 xor_idx_to_desc = 0xd0;
33774+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
33775+static const u8 pq_idx_to_desc = 0xf8;
33776+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
33777
33778 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
33779 {
33780diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
33781index 85c464a..afd1e73 100644
33782--- a/drivers/edac/amd64_edac.c
33783+++ b/drivers/edac/amd64_edac.c
33784@@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
33785 * PCI core identifies what devices are on a system during boot, and then
33786 * inquiry this table to see if this driver is for a given device found.
33787 */
33788-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
33789+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
33790 {
33791 .vendor = PCI_VENDOR_ID_AMD,
33792 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
33793diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
33794index 2b95f1a..4f52793 100644
33795--- a/drivers/edac/amd76x_edac.c
33796+++ b/drivers/edac/amd76x_edac.c
33797@@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
33798 edac_mc_free(mci);
33799 }
33800
33801-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
33802+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
33803 {
33804 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33805 AMD762},
33806diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
33807index d205d49..74c9672 100644
33808--- a/drivers/edac/e752x_edac.c
33809+++ b/drivers/edac/e752x_edac.c
33810@@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
33811 edac_mc_free(mci);
33812 }
33813
33814-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
33815+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
33816 {
33817 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33818 E7520},
33819diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
33820index c7d11cc..c59c1ca 100644
33821--- a/drivers/edac/e7xxx_edac.c
33822+++ b/drivers/edac/e7xxx_edac.c
33823@@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
33824 edac_mc_free(mci);
33825 }
33826
33827-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
33828+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
33829 {
33830 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33831 E7205},
33832diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
33833index 5376457..5fdedbc 100644
33834--- a/drivers/edac/edac_device_sysfs.c
33835+++ b/drivers/edac/edac_device_sysfs.c
33836@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
33837 }
33838
33839 /* edac_dev file operations for an 'ctl_info' */
33840-static struct sysfs_ops device_ctl_info_ops = {
33841+static const struct sysfs_ops device_ctl_info_ops = {
33842 .show = edac_dev_ctl_info_show,
33843 .store = edac_dev_ctl_info_store
33844 };
33845@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj,
33846 }
33847
33848 /* edac_dev file operations for an 'instance' */
33849-static struct sysfs_ops device_instance_ops = {
33850+static const struct sysfs_ops device_instance_ops = {
33851 .show = edac_dev_instance_show,
33852 .store = edac_dev_instance_store
33853 };
33854@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj,
33855 }
33856
33857 /* edac_dev file operations for a 'block' */
33858-static struct sysfs_ops device_block_ops = {
33859+static const struct sysfs_ops device_block_ops = {
33860 .show = edac_dev_block_show,
33861 .store = edac_dev_block_store
33862 };
33863diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
33864index e1d4ce0..88840e9 100644
33865--- a/drivers/edac/edac_mc_sysfs.c
33866+++ b/drivers/edac/edac_mc_sysfs.c
33867@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
33868 return -EIO;
33869 }
33870
33871-static struct sysfs_ops csrowfs_ops = {
33872+static const struct sysfs_ops csrowfs_ops = {
33873 .show = csrowdev_show,
33874 .store = csrowdev_store
33875 };
33876@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
33877 }
33878
33879 /* Intermediate show/store table */
33880-static struct sysfs_ops mci_ops = {
33881+static const struct sysfs_ops mci_ops = {
33882 .show = mcidev_show,
33883 .store = mcidev_store
33884 };
33885diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
33886index 422728c..d8d9c88 100644
33887--- a/drivers/edac/edac_pci_sysfs.c
33888+++ b/drivers/edac/edac_pci_sysfs.c
33889@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
33890 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
33891 static int edac_pci_poll_msec = 1000; /* one second workq period */
33892
33893-static atomic_t pci_parity_count = ATOMIC_INIT(0);
33894-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
33895+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
33896+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
33897
33898 static struct kobject *edac_pci_top_main_kobj;
33899 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
33900@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
33901 }
33902
33903 /* fs_ops table */
33904-static struct sysfs_ops pci_instance_ops = {
33905+static const struct sysfs_ops pci_instance_ops = {
33906 .show = edac_pci_instance_show,
33907 .store = edac_pci_instance_store
33908 };
33909@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
33910 return -EIO;
33911 }
33912
33913-static struct sysfs_ops edac_pci_sysfs_ops = {
33914+static const struct sysfs_ops edac_pci_sysfs_ops = {
33915 .show = edac_pci_dev_show,
33916 .store = edac_pci_dev_store
33917 };
33918@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33919 edac_printk(KERN_CRIT, EDAC_PCI,
33920 "Signaled System Error on %s\n",
33921 pci_name(dev));
33922- atomic_inc(&pci_nonparity_count);
33923+ atomic_inc_unchecked(&pci_nonparity_count);
33924 }
33925
33926 if (status & (PCI_STATUS_PARITY)) {
33927@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33928 "Master Data Parity Error on %s\n",
33929 pci_name(dev));
33930
33931- atomic_inc(&pci_parity_count);
33932+ atomic_inc_unchecked(&pci_parity_count);
33933 }
33934
33935 if (status & (PCI_STATUS_DETECTED_PARITY)) {
33936@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33937 "Detected Parity Error on %s\n",
33938 pci_name(dev));
33939
33940- atomic_inc(&pci_parity_count);
33941+ atomic_inc_unchecked(&pci_parity_count);
33942 }
33943 }
33944
33945@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33946 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
33947 "Signaled System Error on %s\n",
33948 pci_name(dev));
33949- atomic_inc(&pci_nonparity_count);
33950+ atomic_inc_unchecked(&pci_nonparity_count);
33951 }
33952
33953 if (status & (PCI_STATUS_PARITY)) {
33954@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33955 "Master Data Parity Error on "
33956 "%s\n", pci_name(dev));
33957
33958- atomic_inc(&pci_parity_count);
33959+ atomic_inc_unchecked(&pci_parity_count);
33960 }
33961
33962 if (status & (PCI_STATUS_DETECTED_PARITY)) {
33963@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33964 "Detected Parity Error on %s\n",
33965 pci_name(dev));
33966
33967- atomic_inc(&pci_parity_count);
33968+ atomic_inc_unchecked(&pci_parity_count);
33969 }
33970 }
33971 }
33972@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
33973 if (!check_pci_errors)
33974 return;
33975
33976- before_count = atomic_read(&pci_parity_count);
33977+ before_count = atomic_read_unchecked(&pci_parity_count);
33978
33979 /* scan all PCI devices looking for a Parity Error on devices and
33980 * bridges.
33981@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
33982 /* Only if operator has selected panic on PCI Error */
33983 if (edac_pci_get_panic_on_pe()) {
33984 /* If the count is different 'after' from 'before' */
33985- if (before_count != atomic_read(&pci_parity_count))
33986+ if (before_count != atomic_read_unchecked(&pci_parity_count))
33987 panic("EDAC: PCI Parity Error");
33988 }
33989 }
33990diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
33991index 6c9a0f2..9c1cf7e 100644
33992--- a/drivers/edac/i3000_edac.c
33993+++ b/drivers/edac/i3000_edac.c
33994@@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
33995 edac_mc_free(mci);
33996 }
33997
33998-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
33999+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
34000 {
34001 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34002 I3000},
34003diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
34004index fde4db9..fe108f9 100644
34005--- a/drivers/edac/i3200_edac.c
34006+++ b/drivers/edac/i3200_edac.c
34007@@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
34008 edac_mc_free(mci);
34009 }
34010
34011-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
34012+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
34013 {
34014 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34015 I3200},
34016diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
34017index adc10a2..57d4ccf 100644
34018--- a/drivers/edac/i5000_edac.c
34019+++ b/drivers/edac/i5000_edac.c
34020@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
34021 *
34022 * The "E500P" device is the first device supported.
34023 */
34024-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
34025+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
34026 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
34027 .driver_data = I5000P},
34028
34029diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
34030index 22db05a..b2b5503 100644
34031--- a/drivers/edac/i5100_edac.c
34032+++ b/drivers/edac/i5100_edac.c
34033@@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
34034 edac_mc_free(mci);
34035 }
34036
34037-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
34038+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
34039 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
34040 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
34041 { 0, }
34042diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
34043index f99d106..f050710 100644
34044--- a/drivers/edac/i5400_edac.c
34045+++ b/drivers/edac/i5400_edac.c
34046@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
34047 *
34048 * The "E500P" device is the first device supported.
34049 */
34050-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
34051+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
34052 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
34053 {0,} /* 0 terminated list. */
34054 };
34055diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
34056index 577760a..9ce16ce 100644
34057--- a/drivers/edac/i82443bxgx_edac.c
34058+++ b/drivers/edac/i82443bxgx_edac.c
34059@@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
34060
34061 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
34062
34063-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
34064+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
34065 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
34066 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
34067 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
34068diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
34069index c0088ba..64a7b98 100644
34070--- a/drivers/edac/i82860_edac.c
34071+++ b/drivers/edac/i82860_edac.c
34072@@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
34073 edac_mc_free(mci);
34074 }
34075
34076-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
34077+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
34078 {
34079 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34080 I82860},
34081diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
34082index b2d83b9..a34357b 100644
34083--- a/drivers/edac/i82875p_edac.c
34084+++ b/drivers/edac/i82875p_edac.c
34085@@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
34086 edac_mc_free(mci);
34087 }
34088
34089-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
34090+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
34091 {
34092 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34093 I82875P},
34094diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
34095index 2eed3ea..87bbbd1 100644
34096--- a/drivers/edac/i82975x_edac.c
34097+++ b/drivers/edac/i82975x_edac.c
34098@@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
34099 edac_mc_free(mci);
34100 }
34101
34102-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
34103+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
34104 {
34105 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34106 I82975X
34107diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
34108index 9900675..78ac2b6 100644
34109--- a/drivers/edac/r82600_edac.c
34110+++ b/drivers/edac/r82600_edac.c
34111@@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
34112 edac_mc_free(mci);
34113 }
34114
34115-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
34116+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
34117 {
34118 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
34119 },
34120diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
34121index d4ec605..4cfec4e 100644
34122--- a/drivers/edac/x38_edac.c
34123+++ b/drivers/edac/x38_edac.c
34124@@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
34125 edac_mc_free(mci);
34126 }
34127
34128-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
34129+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
34130 {
34131 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34132 X38},
34133diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
34134index 3fc2ceb..daf098f 100644
34135--- a/drivers/firewire/core-card.c
34136+++ b/drivers/firewire/core-card.c
34137@@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
34138
34139 void fw_core_remove_card(struct fw_card *card)
34140 {
34141- struct fw_card_driver dummy_driver = dummy_driver_template;
34142+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
34143
34144 card->driver->update_phy_reg(card, 4,
34145 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
34146diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
34147index 4560d8f..36db24a 100644
34148--- a/drivers/firewire/core-cdev.c
34149+++ b/drivers/firewire/core-cdev.c
34150@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client,
34151 int ret;
34152
34153 if ((request->channels == 0 && request->bandwidth == 0) ||
34154- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
34155- request->bandwidth < 0)
34156+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
34157 return -EINVAL;
34158
34159 r = kmalloc(sizeof(*r), GFP_KERNEL);
34160diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
34161index da628c7..cf54a2c 100644
34162--- a/drivers/firewire/core-transaction.c
34163+++ b/drivers/firewire/core-transaction.c
34164@@ -36,6 +36,7 @@
34165 #include <linux/string.h>
34166 #include <linux/timer.h>
34167 #include <linux/types.h>
34168+#include <linux/sched.h>
34169
34170 #include <asm/byteorder.h>
34171
34172@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
34173 struct transaction_callback_data d;
34174 struct fw_transaction t;
34175
34176+ pax_track_stack();
34177+
34178 init_completion(&d.done);
34179 d.payload = payload;
34180 fw_send_request(card, &t, tcode, destination_id, generation, speed,
34181diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
34182index 7ff6e75..a2965d9 100644
34183--- a/drivers/firewire/core.h
34184+++ b/drivers/firewire/core.h
34185@@ -86,6 +86,7 @@ struct fw_card_driver {
34186
34187 int (*stop_iso)(struct fw_iso_context *ctx);
34188 };
34189+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
34190
34191 void fw_card_initialize(struct fw_card *card,
34192 const struct fw_card_driver *driver, struct device *device);
34193diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
34194index 3a2ccb0..82fd7c4 100644
34195--- a/drivers/firmware/dmi_scan.c
34196+++ b/drivers/firmware/dmi_scan.c
34197@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
34198 }
34199 }
34200 else {
34201- /*
34202- * no iounmap() for that ioremap(); it would be a no-op, but
34203- * it's so early in setup that sucker gets confused into doing
34204- * what it shouldn't if we actually call it.
34205- */
34206 p = dmi_ioremap(0xF0000, 0x10000);
34207 if (p == NULL)
34208 goto error;
34209@@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
34210 if (buf == NULL)
34211 return -1;
34212
34213- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
34214+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
34215
34216 iounmap(buf);
34217 return 0;
34218diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
34219index 9e4f59d..110e24e 100644
34220--- a/drivers/firmware/edd.c
34221+++ b/drivers/firmware/edd.c
34222@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
34223 return ret;
34224 }
34225
34226-static struct sysfs_ops edd_attr_ops = {
34227+static const struct sysfs_ops edd_attr_ops = {
34228 .show = edd_attr_show,
34229 };
34230
34231diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
34232index f4f709d..082f06e 100644
34233--- a/drivers/firmware/efivars.c
34234+++ b/drivers/firmware/efivars.c
34235@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
34236 return ret;
34237 }
34238
34239-static struct sysfs_ops efivar_attr_ops = {
34240+static const struct sysfs_ops efivar_attr_ops = {
34241 .show = efivar_attr_show,
34242 .store = efivar_attr_store,
34243 };
34244diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
34245index 051d1eb..0a5d4e7 100644
34246--- a/drivers/firmware/iscsi_ibft.c
34247+++ b/drivers/firmware/iscsi_ibft.c
34248@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj,
34249 return ret;
34250 }
34251
34252-static struct sysfs_ops ibft_attr_ops = {
34253+static const struct sysfs_ops ibft_attr_ops = {
34254 .show = ibft_show_attribute,
34255 };
34256
34257diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
34258index 56f9234..8c58c7b 100644
34259--- a/drivers/firmware/memmap.c
34260+++ b/drivers/firmware/memmap.c
34261@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
34262 NULL
34263 };
34264
34265-static struct sysfs_ops memmap_attr_ops = {
34266+static const struct sysfs_ops memmap_attr_ops = {
34267 .show = memmap_attr_show,
34268 };
34269
34270diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
34271index b16c9a8..2af7d3f 100644
34272--- a/drivers/gpio/vr41xx_giu.c
34273+++ b/drivers/gpio/vr41xx_giu.c
34274@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
34275 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
34276 maskl, pendl, maskh, pendh);
34277
34278- atomic_inc(&irq_err_count);
34279+ atomic_inc_unchecked(&irq_err_count);
34280
34281 return -EINVAL;
34282 }
34283diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
34284index bea6efc..3dc0f42 100644
34285--- a/drivers/gpu/drm/drm_crtc.c
34286+++ b/drivers/gpu/drm/drm_crtc.c
34287@@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
34288 */
34289 if ((out_resp->count_modes >= mode_count) && mode_count) {
34290 copied = 0;
34291- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
34292+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
34293 list_for_each_entry(mode, &connector->modes, head) {
34294 drm_crtc_convert_to_umode(&u_mode, mode);
34295 if (copy_to_user(mode_ptr + copied,
34296@@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
34297
34298 if ((out_resp->count_props >= props_count) && props_count) {
34299 copied = 0;
34300- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
34301- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
34302+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
34303+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
34304 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
34305 if (connector->property_ids[i] != 0) {
34306 if (put_user(connector->property_ids[i],
34307@@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
34308
34309 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
34310 copied = 0;
34311- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
34312+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
34313 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
34314 if (connector->encoder_ids[i] != 0) {
34315 if (put_user(connector->encoder_ids[i],
34316@@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
34317 }
34318
34319 for (i = 0; i < crtc_req->count_connectors; i++) {
34320- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
34321+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
34322 if (get_user(out_id, &set_connectors_ptr[i])) {
34323 ret = -EFAULT;
34324 goto out;
34325@@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
34326 out_resp->flags = property->flags;
34327
34328 if ((out_resp->count_values >= value_count) && value_count) {
34329- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
34330+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
34331 for (i = 0; i < value_count; i++) {
34332 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
34333 ret = -EFAULT;
34334@@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
34335 if (property->flags & DRM_MODE_PROP_ENUM) {
34336 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
34337 copied = 0;
34338- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
34339+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
34340 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
34341
34342 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
34343@@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
34344 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
34345 copied = 0;
34346 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
34347- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
34348+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
34349
34350 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
34351 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
34352@@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
34353 blob = obj_to_blob(obj);
34354
34355 if (out_resp->length == blob->length) {
34356- blob_ptr = (void *)(unsigned long)out_resp->data;
34357+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
34358 if (copy_to_user(blob_ptr, blob->data, blob->length)){
34359 ret = -EFAULT;
34360 goto done;
34361diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
34362index 1b8745d..92fdbf6 100644
34363--- a/drivers/gpu/drm/drm_crtc_helper.c
34364+++ b/drivers/gpu/drm/drm_crtc_helper.c
34365@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
34366 struct drm_crtc *tmp;
34367 int crtc_mask = 1;
34368
34369- WARN(!crtc, "checking null crtc?");
34370+ BUG_ON(!crtc);
34371
34372 dev = crtc->dev;
34373
34374@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
34375
34376 adjusted_mode = drm_mode_duplicate(dev, mode);
34377
34378+ pax_track_stack();
34379+
34380 crtc->enabled = drm_helper_crtc_in_use(crtc);
34381
34382 if (!crtc->enabled)
34383diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
34384index 0e27d98..dec8768 100644
34385--- a/drivers/gpu/drm/drm_drv.c
34386+++ b/drivers/gpu/drm/drm_drv.c
34387@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
34388 char *kdata = NULL;
34389
34390 atomic_inc(&dev->ioctl_count);
34391- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
34392+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
34393 ++file_priv->ioctl_count;
34394
34395 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
34396diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
34397index 519161e..98c840c 100644
34398--- a/drivers/gpu/drm/drm_fops.c
34399+++ b/drivers/gpu/drm/drm_fops.c
34400@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev)
34401 }
34402
34403 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
34404- atomic_set(&dev->counts[i], 0);
34405+ atomic_set_unchecked(&dev->counts[i], 0);
34406
34407 dev->sigdata.lock = NULL;
34408
34409@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp)
34410
34411 retcode = drm_open_helper(inode, filp, dev);
34412 if (!retcode) {
34413- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
34414+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
34415 spin_lock(&dev->count_lock);
34416- if (!dev->open_count++) {
34417+ if (local_inc_return(&dev->open_count) == 1) {
34418 spin_unlock(&dev->count_lock);
34419 retcode = drm_setup(dev);
34420 goto out;
34421@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
34422
34423 lock_kernel();
34424
34425- DRM_DEBUG("open_count = %d\n", dev->open_count);
34426+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
34427
34428 if (dev->driver->preclose)
34429 dev->driver->preclose(dev, file_priv);
34430@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp)
34431 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
34432 task_pid_nr(current),
34433 (long)old_encode_dev(file_priv->minor->device),
34434- dev->open_count);
34435+ local_read(&dev->open_count));
34436
34437 /* Release any auth tokens that might point to this file_priv,
34438 (do that under the drm_global_mutex) */
34439@@ -529,9 +529,9 @@ int drm_release(struct inode *inode, struct file *filp)
34440 * End inline drm_release
34441 */
34442
34443- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
34444+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
34445 spin_lock(&dev->count_lock);
34446- if (!--dev->open_count) {
34447+ if (local_dec_and_test(&dev->open_count)) {
34448 if (atomic_read(&dev->ioctl_count)) {
34449 DRM_ERROR("Device busy: %d\n",
34450 atomic_read(&dev->ioctl_count));
34451diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
34452index 8bf3770..79422805 100644
34453--- a/drivers/gpu/drm/drm_gem.c
34454+++ b/drivers/gpu/drm/drm_gem.c
34455@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
34456 spin_lock_init(&dev->object_name_lock);
34457 idr_init(&dev->object_name_idr);
34458 atomic_set(&dev->object_count, 0);
34459- atomic_set(&dev->object_memory, 0);
34460+ atomic_set_unchecked(&dev->object_memory, 0);
34461 atomic_set(&dev->pin_count, 0);
34462- atomic_set(&dev->pin_memory, 0);
34463+ atomic_set_unchecked(&dev->pin_memory, 0);
34464 atomic_set(&dev->gtt_count, 0);
34465- atomic_set(&dev->gtt_memory, 0);
34466+ atomic_set_unchecked(&dev->gtt_memory, 0);
34467
34468 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
34469 if (!mm) {
34470@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
34471 goto fput;
34472 }
34473 atomic_inc(&dev->object_count);
34474- atomic_add(obj->size, &dev->object_memory);
34475+ atomic_add_unchecked(obj->size, &dev->object_memory);
34476 return obj;
34477 fput:
34478 fput(obj->filp);
34479@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
34480
34481 fput(obj->filp);
34482 atomic_dec(&dev->object_count);
34483- atomic_sub(obj->size, &dev->object_memory);
34484+ atomic_sub_unchecked(obj->size, &dev->object_memory);
34485 kfree(obj);
34486 }
34487 EXPORT_SYMBOL(drm_gem_object_free);
34488diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
34489index f0f6c6b..34af322 100644
34490--- a/drivers/gpu/drm/drm_info.c
34491+++ b/drivers/gpu/drm/drm_info.c
34492@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
34493 struct drm_local_map *map;
34494 struct drm_map_list *r_list;
34495
34496- /* Hardcoded from _DRM_FRAME_BUFFER,
34497- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
34498- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
34499- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
34500+ static const char * const types[] = {
34501+ [_DRM_FRAME_BUFFER] = "FB",
34502+ [_DRM_REGISTERS] = "REG",
34503+ [_DRM_SHM] = "SHM",
34504+ [_DRM_AGP] = "AGP",
34505+ [_DRM_SCATTER_GATHER] = "SG",
34506+ [_DRM_CONSISTENT] = "PCI",
34507+ [_DRM_GEM] = "GEM" };
34508 const char *type;
34509 int i;
34510
34511@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
34512 map = r_list->map;
34513 if (!map)
34514 continue;
34515- if (map->type < 0 || map->type > 5)
34516+ if (map->type >= ARRAY_SIZE(types))
34517 type = "??";
34518 else
34519 type = types[map->type];
34520@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data)
34521 struct drm_device *dev = node->minor->dev;
34522
34523 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
34524- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
34525+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
34526 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
34527- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
34528- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
34529+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
34530+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
34531 seq_printf(m, "%d gtt total\n", dev->gtt_total);
34532 return 0;
34533 }
34534@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data)
34535 mutex_lock(&dev->struct_mutex);
34536 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
34537 atomic_read(&dev->vma_count),
34538+#ifdef CONFIG_GRKERNSEC_HIDESYM
34539+ NULL, 0);
34540+#else
34541 high_memory, (u64)virt_to_phys(high_memory));
34542+#endif
34543
34544 list_for_each_entry(pt, &dev->vmalist, head) {
34545 vma = pt->vma;
34546@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data)
34547 continue;
34548 seq_printf(m,
34549 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
34550- pt->pid, vma->vm_start, vma->vm_end,
34551+ pt->pid,
34552+#ifdef CONFIG_GRKERNSEC_HIDESYM
34553+ 0, 0,
34554+#else
34555+ vma->vm_start, vma->vm_end,
34556+#endif
34557 vma->vm_flags & VM_READ ? 'r' : '-',
34558 vma->vm_flags & VM_WRITE ? 'w' : '-',
34559 vma->vm_flags & VM_EXEC ? 'x' : '-',
34560 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
34561 vma->vm_flags & VM_LOCKED ? 'l' : '-',
34562 vma->vm_flags & VM_IO ? 'i' : '-',
34563+#ifdef CONFIG_GRKERNSEC_HIDESYM
34564+ 0);
34565+#else
34566 vma->vm_pgoff);
34567+#endif
34568
34569 #if defined(__i386__)
34570 pgprot = pgprot_val(vma->vm_page_prot);
34571diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
34572index 282d9fd..71e5f11 100644
34573--- a/drivers/gpu/drm/drm_ioc32.c
34574+++ b/drivers/gpu/drm/drm_ioc32.c
34575@@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
34576 request = compat_alloc_user_space(nbytes);
34577 if (!access_ok(VERIFY_WRITE, request, nbytes))
34578 return -EFAULT;
34579- list = (struct drm_buf_desc *) (request + 1);
34580+ list = (struct drm_buf_desc __user *) (request + 1);
34581
34582 if (__put_user(count, &request->count)
34583 || __put_user(list, &request->list))
34584@@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
34585 request = compat_alloc_user_space(nbytes);
34586 if (!access_ok(VERIFY_WRITE, request, nbytes))
34587 return -EFAULT;
34588- list = (struct drm_buf_pub *) (request + 1);
34589+ list = (struct drm_buf_pub __user *) (request + 1);
34590
34591 if (__put_user(count, &request->count)
34592 || __put_user(list, &request->list))
34593diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
34594index 9b9ff46..4ea724c 100644
34595--- a/drivers/gpu/drm/drm_ioctl.c
34596+++ b/drivers/gpu/drm/drm_ioctl.c
34597@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
34598 stats->data[i].value =
34599 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
34600 else
34601- stats->data[i].value = atomic_read(&dev->counts[i]);
34602+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
34603 stats->data[i].type = dev->types[i];
34604 }
34605
34606diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
34607index e2f70a5..c703e86 100644
34608--- a/drivers/gpu/drm/drm_lock.c
34609+++ b/drivers/gpu/drm/drm_lock.c
34610@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
34611 if (drm_lock_take(&master->lock, lock->context)) {
34612 master->lock.file_priv = file_priv;
34613 master->lock.lock_time = jiffies;
34614- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
34615+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
34616 break; /* Got lock */
34617 }
34618
34619@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
34620 return -EINVAL;
34621 }
34622
34623- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
34624+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
34625
34626 /* kernel_context_switch isn't used by any of the x86 drm
34627 * modules but is required by the Sparc driver.
34628diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
34629index 7d1d88c..b9131b2 100644
34630--- a/drivers/gpu/drm/i810/i810_dma.c
34631+++ b/drivers/gpu/drm/i810/i810_dma.c
34632@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
34633 dma->buflist[vertex->idx],
34634 vertex->discard, vertex->used);
34635
34636- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
34637- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
34638+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
34639+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
34640 sarea_priv->last_enqueue = dev_priv->counter - 1;
34641 sarea_priv->last_dispatch = (int)hw_status[5];
34642
34643@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
34644 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
34645 mc->last_render);
34646
34647- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
34648- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
34649+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
34650+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
34651 sarea_priv->last_enqueue = dev_priv->counter - 1;
34652 sarea_priv->last_dispatch = (int)hw_status[5];
34653
34654diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
34655index 21e2691..7321edd 100644
34656--- a/drivers/gpu/drm/i810/i810_drv.h
34657+++ b/drivers/gpu/drm/i810/i810_drv.h
34658@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
34659 int page_flipping;
34660
34661 wait_queue_head_t irq_queue;
34662- atomic_t irq_received;
34663- atomic_t irq_emitted;
34664+ atomic_unchecked_t irq_received;
34665+ atomic_unchecked_t irq_emitted;
34666
34667 int front_offset;
34668 } drm_i810_private_t;
34669diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
34670index da82afe..48a45de 100644
34671--- a/drivers/gpu/drm/i830/i830_drv.h
34672+++ b/drivers/gpu/drm/i830/i830_drv.h
34673@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
34674 int page_flipping;
34675
34676 wait_queue_head_t irq_queue;
34677- atomic_t irq_received;
34678- atomic_t irq_emitted;
34679+ atomic_unchecked_t irq_received;
34680+ atomic_unchecked_t irq_emitted;
34681
34682 int use_mi_batchbuffer_start;
34683
34684diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
34685index 91ec2bb..6f21fab 100644
34686--- a/drivers/gpu/drm/i830/i830_irq.c
34687+++ b/drivers/gpu/drm/i830/i830_irq.c
34688@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
34689
34690 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
34691
34692- atomic_inc(&dev_priv->irq_received);
34693+ atomic_inc_unchecked(&dev_priv->irq_received);
34694 wake_up_interruptible(&dev_priv->irq_queue);
34695
34696 return IRQ_HANDLED;
34697@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev)
34698
34699 DRM_DEBUG("%s\n", __func__);
34700
34701- atomic_inc(&dev_priv->irq_emitted);
34702+ atomic_inc_unchecked(&dev_priv->irq_emitted);
34703
34704 BEGIN_LP_RING(2);
34705 OUT_RING(0);
34706 OUT_RING(GFX_OP_USER_INTERRUPT);
34707 ADVANCE_LP_RING();
34708
34709- return atomic_read(&dev_priv->irq_emitted);
34710+ return atomic_read_unchecked(&dev_priv->irq_emitted);
34711 }
34712
34713 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
34714@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
34715
34716 DRM_DEBUG("%s\n", __func__);
34717
34718- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
34719+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
34720 return 0;
34721
34722 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
34723@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
34724
34725 for (;;) {
34726 __set_current_state(TASK_INTERRUPTIBLE);
34727- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
34728+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
34729 break;
34730 if ((signed)(end - jiffies) <= 0) {
34731 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
34732@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
34733 I830_WRITE16(I830REG_HWSTAM, 0xffff);
34734 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
34735 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
34736- atomic_set(&dev_priv->irq_received, 0);
34737- atomic_set(&dev_priv->irq_emitted, 0);
34738+ atomic_set_unchecked(&dev_priv->irq_received, 0);
34739+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
34740 init_waitqueue_head(&dev_priv->irq_queue);
34741 }
34742
34743diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
34744index 288fc50..c6092055 100644
34745--- a/drivers/gpu/drm/i915/dvo.h
34746+++ b/drivers/gpu/drm/i915/dvo.h
34747@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
34748 *
34749 * \return singly-linked list of modes or NULL if no modes found.
34750 */
34751- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
34752+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
34753
34754 /**
34755 * Clean up driver-specific bits of the output
34756 */
34757- void (*destroy) (struct intel_dvo_device *dvo);
34758+ void (* const destroy) (struct intel_dvo_device *dvo);
34759
34760 /**
34761 * Debugging hook to dump device registers to log file
34762 */
34763- void (*dump_regs)(struct intel_dvo_device *dvo);
34764+ void (* const dump_regs)(struct intel_dvo_device *dvo);
34765 };
34766
34767-extern struct intel_dvo_dev_ops sil164_ops;
34768-extern struct intel_dvo_dev_ops ch7xxx_ops;
34769-extern struct intel_dvo_dev_ops ivch_ops;
34770-extern struct intel_dvo_dev_ops tfp410_ops;
34771-extern struct intel_dvo_dev_ops ch7017_ops;
34772+extern const struct intel_dvo_dev_ops sil164_ops;
34773+extern const struct intel_dvo_dev_ops ch7xxx_ops;
34774+extern const struct intel_dvo_dev_ops ivch_ops;
34775+extern const struct intel_dvo_dev_ops tfp410_ops;
34776+extern const struct intel_dvo_dev_ops ch7017_ops;
34777
34778 #endif /* _INTEL_DVO_H */
34779diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
34780index 621815b..499d82e 100644
34781--- a/drivers/gpu/drm/i915/dvo_ch7017.c
34782+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
34783@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
34784 }
34785 }
34786
34787-struct intel_dvo_dev_ops ch7017_ops = {
34788+const struct intel_dvo_dev_ops ch7017_ops = {
34789 .init = ch7017_init,
34790 .detect = ch7017_detect,
34791 .mode_valid = ch7017_mode_valid,
34792diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
34793index a9b8962..ac769ba 100644
34794--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
34795+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
34796@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
34797 }
34798 }
34799
34800-struct intel_dvo_dev_ops ch7xxx_ops = {
34801+const struct intel_dvo_dev_ops ch7xxx_ops = {
34802 .init = ch7xxx_init,
34803 .detect = ch7xxx_detect,
34804 .mode_valid = ch7xxx_mode_valid,
34805diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
34806index aa176f9..ed2930c 100644
34807--- a/drivers/gpu/drm/i915/dvo_ivch.c
34808+++ b/drivers/gpu/drm/i915/dvo_ivch.c
34809@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
34810 }
34811 }
34812
34813-struct intel_dvo_dev_ops ivch_ops= {
34814+const struct intel_dvo_dev_ops ivch_ops= {
34815 .init = ivch_init,
34816 .dpms = ivch_dpms,
34817 .save = ivch_save,
34818diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
34819index e1c1f73..7dbebcf 100644
34820--- a/drivers/gpu/drm/i915/dvo_sil164.c
34821+++ b/drivers/gpu/drm/i915/dvo_sil164.c
34822@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
34823 }
34824 }
34825
34826-struct intel_dvo_dev_ops sil164_ops = {
34827+const struct intel_dvo_dev_ops sil164_ops = {
34828 .init = sil164_init,
34829 .detect = sil164_detect,
34830 .mode_valid = sil164_mode_valid,
34831diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
34832index 16dce84..7e1b6f8 100644
34833--- a/drivers/gpu/drm/i915/dvo_tfp410.c
34834+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
34835@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
34836 }
34837 }
34838
34839-struct intel_dvo_dev_ops tfp410_ops = {
34840+const struct intel_dvo_dev_ops tfp410_ops = {
34841 .init = tfp410_init,
34842 .detect = tfp410_detect,
34843 .mode_valid = tfp410_mode_valid,
34844diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
34845index 7e859d6..7d1cf2b 100644
34846--- a/drivers/gpu/drm/i915/i915_debugfs.c
34847+++ b/drivers/gpu/drm/i915/i915_debugfs.c
34848@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
34849 I915_READ(GTIMR));
34850 }
34851 seq_printf(m, "Interrupts received: %d\n",
34852- atomic_read(&dev_priv->irq_received));
34853+ atomic_read_unchecked(&dev_priv->irq_received));
34854 if (dev_priv->hw_status_page != NULL) {
34855 seq_printf(m, "Current sequence: %d\n",
34856 i915_get_gem_seqno(dev));
34857diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
34858index 5449239..7e4f68d 100644
34859--- a/drivers/gpu/drm/i915/i915_drv.c
34860+++ b/drivers/gpu/drm/i915/i915_drv.c
34861@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
34862 return i915_resume(dev);
34863 }
34864
34865-static struct vm_operations_struct i915_gem_vm_ops = {
34866+static const struct vm_operations_struct i915_gem_vm_ops = {
34867 .fault = i915_gem_fault,
34868 .open = drm_gem_vm_open,
34869 .close = drm_gem_vm_close,
34870diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
34871index 97163f7..c24c7c7 100644
34872--- a/drivers/gpu/drm/i915/i915_drv.h
34873+++ b/drivers/gpu/drm/i915/i915_drv.h
34874@@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
34875 /* display clock increase/decrease */
34876 /* pll clock increase/decrease */
34877 /* clock gating init */
34878-};
34879+} __no_const;
34880
34881 typedef struct drm_i915_private {
34882 struct drm_device *dev;
34883@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
34884 int page_flipping;
34885
34886 wait_queue_head_t irq_queue;
34887- atomic_t irq_received;
34888+ atomic_unchecked_t irq_received;
34889 /** Protects user_irq_refcount and irq_mask_reg */
34890 spinlock_t user_irq_lock;
34891 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
34892diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
34893index 27a3074..eb3f959 100644
34894--- a/drivers/gpu/drm/i915/i915_gem.c
34895+++ b/drivers/gpu/drm/i915/i915_gem.c
34896@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
34897
34898 args->aper_size = dev->gtt_total;
34899 args->aper_available_size = (args->aper_size -
34900- atomic_read(&dev->pin_memory));
34901+ atomic_read_unchecked(&dev->pin_memory));
34902
34903 return 0;
34904 }
34905@@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
34906
34907 if (obj_priv->gtt_space) {
34908 atomic_dec(&dev->gtt_count);
34909- atomic_sub(obj->size, &dev->gtt_memory);
34910+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
34911
34912 drm_mm_put_block(obj_priv->gtt_space);
34913 obj_priv->gtt_space = NULL;
34914@@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
34915 goto search_free;
34916 }
34917 atomic_inc(&dev->gtt_count);
34918- atomic_add(obj->size, &dev->gtt_memory);
34919+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
34920
34921 /* Assert that the object is not currently in any GPU domain. As it
34922 * wasn't in the GTT, there shouldn't be any way it could have been in
34923@@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
34924 "%d/%d gtt bytes\n",
34925 atomic_read(&dev->object_count),
34926 atomic_read(&dev->pin_count),
34927- atomic_read(&dev->object_memory),
34928- atomic_read(&dev->pin_memory),
34929- atomic_read(&dev->gtt_memory),
34930+ atomic_read_unchecked(&dev->object_memory),
34931+ atomic_read_unchecked(&dev->pin_memory),
34932+ atomic_read_unchecked(&dev->gtt_memory),
34933 dev->gtt_total);
34934 }
34935 goto err;
34936@@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
34937 */
34938 if (obj_priv->pin_count == 1) {
34939 atomic_inc(&dev->pin_count);
34940- atomic_add(obj->size, &dev->pin_memory);
34941+ atomic_add_unchecked(obj->size, &dev->pin_memory);
34942 if (!obj_priv->active &&
34943 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
34944 !list_empty(&obj_priv->list))
34945@@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
34946 list_move_tail(&obj_priv->list,
34947 &dev_priv->mm.inactive_list);
34948 atomic_dec(&dev->pin_count);
34949- atomic_sub(obj->size, &dev->pin_memory);
34950+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
34951 }
34952 i915_verify_inactive(dev, __FILE__, __LINE__);
34953 }
34954diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
34955index 63f28ad..f5469da 100644
34956--- a/drivers/gpu/drm/i915/i915_irq.c
34957+++ b/drivers/gpu/drm/i915/i915_irq.c
34958@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
34959 int irq_received;
34960 int ret = IRQ_NONE;
34961
34962- atomic_inc(&dev_priv->irq_received);
34963+ atomic_inc_unchecked(&dev_priv->irq_received);
34964
34965 if (IS_IGDNG(dev))
34966 return igdng_irq_handler(dev);
34967@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
34968 {
34969 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
34970
34971- atomic_set(&dev_priv->irq_received, 0);
34972+ atomic_set_unchecked(&dev_priv->irq_received, 0);
34973
34974 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
34975 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
34976diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
34977index 5d9c6a7..d1b0e29 100644
34978--- a/drivers/gpu/drm/i915/intel_sdvo.c
34979+++ b/drivers/gpu/drm/i915/intel_sdvo.c
34980@@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
34981 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
34982
34983 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
34984- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
34985+ pax_open_kernel();
34986+ *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
34987+ pax_close_kernel();
34988
34989 /* Read the regs to test if we can talk to the device */
34990 for (i = 0; i < 0x40; i++) {
34991diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
34992index be6c6b9..8615d9c 100644
34993--- a/drivers/gpu/drm/mga/mga_drv.h
34994+++ b/drivers/gpu/drm/mga/mga_drv.h
34995@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
34996 u32 clear_cmd;
34997 u32 maccess;
34998
34999- atomic_t vbl_received; /**< Number of vblanks received. */
35000+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
35001 wait_queue_head_t fence_queue;
35002- atomic_t last_fence_retired;
35003+ atomic_unchecked_t last_fence_retired;
35004 u32 next_fence_to_post;
35005
35006 unsigned int fb_cpp;
35007diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
35008index daa6041..a28a5da 100644
35009--- a/drivers/gpu/drm/mga/mga_irq.c
35010+++ b/drivers/gpu/drm/mga/mga_irq.c
35011@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
35012 if (crtc != 0)
35013 return 0;
35014
35015- return atomic_read(&dev_priv->vbl_received);
35016+ return atomic_read_unchecked(&dev_priv->vbl_received);
35017 }
35018
35019
35020@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35021 /* VBLANK interrupt */
35022 if (status & MGA_VLINEPEN) {
35023 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
35024- atomic_inc(&dev_priv->vbl_received);
35025+ atomic_inc_unchecked(&dev_priv->vbl_received);
35026 drm_handle_vblank(dev, 0);
35027 handled = 1;
35028 }
35029@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35030 MGA_WRITE(MGA_PRIMEND, prim_end);
35031 }
35032
35033- atomic_inc(&dev_priv->last_fence_retired);
35034+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
35035 DRM_WAKEUP(&dev_priv->fence_queue);
35036 handled = 1;
35037 }
35038@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
35039 * using fences.
35040 */
35041 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
35042- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
35043+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
35044 - *sequence) <= (1 << 23)));
35045
35046 *sequence = cur_fence;
35047diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
35048index 4c39a40..b22a9ea 100644
35049--- a/drivers/gpu/drm/r128/r128_cce.c
35050+++ b/drivers/gpu/drm/r128/r128_cce.c
35051@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
35052
35053 /* GH: Simple idle check.
35054 */
35055- atomic_set(&dev_priv->idle_count, 0);
35056+ atomic_set_unchecked(&dev_priv->idle_count, 0);
35057
35058 /* We don't support anything other than bus-mastering ring mode,
35059 * but the ring can be in either AGP or PCI space for the ring
35060diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
35061index 3c60829..4faf484 100644
35062--- a/drivers/gpu/drm/r128/r128_drv.h
35063+++ b/drivers/gpu/drm/r128/r128_drv.h
35064@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
35065 int is_pci;
35066 unsigned long cce_buffers_offset;
35067
35068- atomic_t idle_count;
35069+ atomic_unchecked_t idle_count;
35070
35071 int page_flipping;
35072 int current_page;
35073 u32 crtc_offset;
35074 u32 crtc_offset_cntl;
35075
35076- atomic_t vbl_received;
35077+ atomic_unchecked_t vbl_received;
35078
35079 u32 color_fmt;
35080 unsigned int front_offset;
35081diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
35082index 69810fb..97bf17a 100644
35083--- a/drivers/gpu/drm/r128/r128_irq.c
35084+++ b/drivers/gpu/drm/r128/r128_irq.c
35085@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
35086 if (crtc != 0)
35087 return 0;
35088
35089- return atomic_read(&dev_priv->vbl_received);
35090+ return atomic_read_unchecked(&dev_priv->vbl_received);
35091 }
35092
35093 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
35094@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
35095 /* VBLANK interrupt */
35096 if (status & R128_CRTC_VBLANK_INT) {
35097 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
35098- atomic_inc(&dev_priv->vbl_received);
35099+ atomic_inc_unchecked(&dev_priv->vbl_received);
35100 drm_handle_vblank(dev, 0);
35101 return IRQ_HANDLED;
35102 }
35103diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
35104index af2665c..51922d2 100644
35105--- a/drivers/gpu/drm/r128/r128_state.c
35106+++ b/drivers/gpu/drm/r128/r128_state.c
35107@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
35108
35109 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
35110 {
35111- if (atomic_read(&dev_priv->idle_count) == 0) {
35112+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
35113 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
35114 } else {
35115- atomic_set(&dev_priv->idle_count, 0);
35116+ atomic_set_unchecked(&dev_priv->idle_count, 0);
35117 }
35118 }
35119
35120diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
35121index dd72b91..8644b3c 100644
35122--- a/drivers/gpu/drm/radeon/atom.c
35123+++ b/drivers/gpu/drm/radeon/atom.c
35124@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
35125 char name[512];
35126 int i;
35127
35128+ pax_track_stack();
35129+
35130 ctx->card = card;
35131 ctx->bios = bios;
35132
35133diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
35134index 0d79577..efaa7a5 100644
35135--- a/drivers/gpu/drm/radeon/mkregtable.c
35136+++ b/drivers/gpu/drm/radeon/mkregtable.c
35137@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
35138 regex_t mask_rex;
35139 regmatch_t match[4];
35140 char buf[1024];
35141- size_t end;
35142+ long end;
35143 int len;
35144 int done = 0;
35145 int r;
35146 unsigned o;
35147 struct offset *offset;
35148 char last_reg_s[10];
35149- int last_reg;
35150+ unsigned long last_reg;
35151
35152 if (regcomp
35153 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
35154diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
35155index 6735213..38c2c67 100644
35156--- a/drivers/gpu/drm/radeon/radeon.h
35157+++ b/drivers/gpu/drm/radeon/radeon.h
35158@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
35159 */
35160 struct radeon_fence_driver {
35161 uint32_t scratch_reg;
35162- atomic_t seq;
35163+ atomic_unchecked_t seq;
35164 uint32_t last_seq;
35165 unsigned long count_timeout;
35166 wait_queue_head_t queue;
35167@@ -640,7 +640,7 @@ struct radeon_asic {
35168 uint32_t offset, uint32_t obj_size);
35169 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
35170 void (*bandwidth_update)(struct radeon_device *rdev);
35171-};
35172+} __no_const;
35173
35174 /*
35175 * Asic structures
35176diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
35177index 4e928b9..d8b6008 100644
35178--- a/drivers/gpu/drm/radeon/radeon_atombios.c
35179+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
35180@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
35181 bool linkb;
35182 struct radeon_i2c_bus_rec ddc_bus;
35183
35184+ pax_track_stack();
35185+
35186 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
35187
35188 if (data_offset == 0)
35189@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
35190 }
35191 }
35192
35193-struct bios_connector {
35194+static struct bios_connector {
35195 bool valid;
35196 uint16_t line_mux;
35197 uint16_t devices;
35198 int connector_type;
35199 struct radeon_i2c_bus_rec ddc_bus;
35200-};
35201+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
35202
35203 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
35204 drm_device
35205@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
35206 uint8_t dac;
35207 union atom_supported_devices *supported_devices;
35208 int i, j;
35209- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
35210
35211 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
35212
35213diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
35214index 083a181..ccccae0 100644
35215--- a/drivers/gpu/drm/radeon/radeon_display.c
35216+++ b/drivers/gpu/drm/radeon/radeon_display.c
35217@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
35218
35219 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
35220 error = freq - current_freq;
35221- error = error < 0 ? 0xffffffff : error;
35222+ error = (int32_t)error < 0 ? 0xffffffff : error;
35223 } else
35224 error = abs(current_freq - freq);
35225 vco_diff = abs(vco - best_vco);
35226diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
35227index 76e4070..193fa7f 100644
35228--- a/drivers/gpu/drm/radeon/radeon_drv.h
35229+++ b/drivers/gpu/drm/radeon/radeon_drv.h
35230@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
35231
35232 /* SW interrupt */
35233 wait_queue_head_t swi_queue;
35234- atomic_t swi_emitted;
35235+ atomic_unchecked_t swi_emitted;
35236 int vblank_crtc;
35237 uint32_t irq_enable_reg;
35238 uint32_t r500_disp_irq_reg;
35239diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
35240index 3beb26d..6ce9c4a 100644
35241--- a/drivers/gpu/drm/radeon/radeon_fence.c
35242+++ b/drivers/gpu/drm/radeon/radeon_fence.c
35243@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
35244 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
35245 return 0;
35246 }
35247- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
35248+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
35249 if (!rdev->cp.ready) {
35250 /* FIXME: cp is not running assume everythings is done right
35251 * away
35252@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
35253 return r;
35254 }
35255 WREG32(rdev->fence_drv.scratch_reg, 0);
35256- atomic_set(&rdev->fence_drv.seq, 0);
35257+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
35258 INIT_LIST_HEAD(&rdev->fence_drv.created);
35259 INIT_LIST_HEAD(&rdev->fence_drv.emited);
35260 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
35261diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
35262index a1bf11d..4a123c0 100644
35263--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
35264+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
35265@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
35266 request = compat_alloc_user_space(sizeof(*request));
35267 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
35268 || __put_user(req32.param, &request->param)
35269- || __put_user((void __user *)(unsigned long)req32.value,
35270+ || __put_user((unsigned long)req32.value,
35271 &request->value))
35272 return -EFAULT;
35273
35274diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
35275index b79ecc4..8dab92d 100644
35276--- a/drivers/gpu/drm/radeon/radeon_irq.c
35277+++ b/drivers/gpu/drm/radeon/radeon_irq.c
35278@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
35279 unsigned int ret;
35280 RING_LOCALS;
35281
35282- atomic_inc(&dev_priv->swi_emitted);
35283- ret = atomic_read(&dev_priv->swi_emitted);
35284+ atomic_inc_unchecked(&dev_priv->swi_emitted);
35285+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
35286
35287 BEGIN_RING(4);
35288 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
35289@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
35290 drm_radeon_private_t *dev_priv =
35291 (drm_radeon_private_t *) dev->dev_private;
35292
35293- atomic_set(&dev_priv->swi_emitted, 0);
35294+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
35295 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
35296
35297 dev->max_vblank_count = 0x001fffff;
35298diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
35299index 4747910..48ca4b3 100644
35300--- a/drivers/gpu/drm/radeon/radeon_state.c
35301+++ b/drivers/gpu/drm/radeon/radeon_state.c
35302@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
35303 {
35304 drm_radeon_private_t *dev_priv = dev->dev_private;
35305 drm_radeon_getparam_t *param = data;
35306- int value;
35307+ int value = 0;
35308
35309 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
35310
35311diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
35312index 1381e06..0e53b17 100644
35313--- a/drivers/gpu/drm/radeon/radeon_ttm.c
35314+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
35315@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev)
35316 DRM_INFO("radeon: ttm finalized\n");
35317 }
35318
35319-static struct vm_operations_struct radeon_ttm_vm_ops;
35320-static const struct vm_operations_struct *ttm_vm_ops = NULL;
35321-
35322-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35323-{
35324- struct ttm_buffer_object *bo;
35325- int r;
35326-
35327- bo = (struct ttm_buffer_object *)vma->vm_private_data;
35328- if (bo == NULL) {
35329- return VM_FAULT_NOPAGE;
35330- }
35331- r = ttm_vm_ops->fault(vma, vmf);
35332- return r;
35333-}
35334-
35335 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
35336 {
35337 struct drm_file *file_priv;
35338 struct radeon_device *rdev;
35339- int r;
35340
35341 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
35342 return drm_mmap(filp, vma);
35343@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
35344
35345 file_priv = (struct drm_file *)filp->private_data;
35346 rdev = file_priv->minor->dev->dev_private;
35347- if (rdev == NULL) {
35348+ if (!rdev)
35349 return -EINVAL;
35350- }
35351- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
35352- if (unlikely(r != 0)) {
35353- return r;
35354- }
35355- if (unlikely(ttm_vm_ops == NULL)) {
35356- ttm_vm_ops = vma->vm_ops;
35357- radeon_ttm_vm_ops = *ttm_vm_ops;
35358- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
35359- }
35360- vma->vm_ops = &radeon_ttm_vm_ops;
35361- return 0;
35362+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
35363 }
35364
35365
35366diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
35367index b12ff76..0bd0c6e 100644
35368--- a/drivers/gpu/drm/radeon/rs690.c
35369+++ b/drivers/gpu/drm/radeon/rs690.c
35370@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
35371 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
35372 rdev->pm.sideport_bandwidth.full)
35373 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
35374- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
35375+ read_delay_latency.full = rfixed_const(800 * 1000);
35376 read_delay_latency.full = rfixed_div(read_delay_latency,
35377 rdev->pm.igp_sideport_mclk);
35378+ a.full = rfixed_const(370);
35379+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
35380 } else {
35381 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
35382 rdev->pm.k8_bandwidth.full)
35383diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
35384index 0ed436e..e6e7ce3 100644
35385--- a/drivers/gpu/drm/ttm/ttm_bo.c
35386+++ b/drivers/gpu/drm/ttm/ttm_bo.c
35387@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
35388 NULL
35389 };
35390
35391-static struct sysfs_ops ttm_bo_global_ops = {
35392+static const struct sysfs_ops ttm_bo_global_ops = {
35393 .show = &ttm_bo_global_show
35394 };
35395
35396diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
35397index 1c040d0..f9e4af8 100644
35398--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
35399+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
35400@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35401 {
35402 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
35403 vma->vm_private_data;
35404- struct ttm_bo_device *bdev = bo->bdev;
35405+ struct ttm_bo_device *bdev;
35406 unsigned long bus_base;
35407 unsigned long bus_offset;
35408 unsigned long bus_size;
35409@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35410 unsigned long address = (unsigned long)vmf->virtual_address;
35411 int retval = VM_FAULT_NOPAGE;
35412
35413+ if (!bo)
35414+ return VM_FAULT_NOPAGE;
35415+ bdev = bo->bdev;
35416+
35417 /*
35418 * Work around locking order reversal in fault / nopfn
35419 * between mmap_sem and bo_reserve: Perform a trylock operation
35420diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
35421index b170071..28ae90e 100644
35422--- a/drivers/gpu/drm/ttm/ttm_global.c
35423+++ b/drivers/gpu/drm/ttm/ttm_global.c
35424@@ -36,7 +36,7 @@
35425 struct ttm_global_item {
35426 struct mutex mutex;
35427 void *object;
35428- int refcount;
35429+ atomic_t refcount;
35430 };
35431
35432 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
35433@@ -49,7 +49,7 @@ void ttm_global_init(void)
35434 struct ttm_global_item *item = &glob[i];
35435 mutex_init(&item->mutex);
35436 item->object = NULL;
35437- item->refcount = 0;
35438+ atomic_set(&item->refcount, 0);
35439 }
35440 }
35441
35442@@ -59,7 +59,7 @@ void ttm_global_release(void)
35443 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
35444 struct ttm_global_item *item = &glob[i];
35445 BUG_ON(item->object != NULL);
35446- BUG_ON(item->refcount != 0);
35447+ BUG_ON(atomic_read(&item->refcount) != 0);
35448 }
35449 }
35450
35451@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
35452 void *object;
35453
35454 mutex_lock(&item->mutex);
35455- if (item->refcount == 0) {
35456+ if (atomic_read(&item->refcount) == 0) {
35457 item->object = kzalloc(ref->size, GFP_KERNEL);
35458 if (unlikely(item->object == NULL)) {
35459 ret = -ENOMEM;
35460@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
35461 goto out_err;
35462
35463 }
35464- ++item->refcount;
35465+ atomic_inc(&item->refcount);
35466 ref->object = item->object;
35467 object = item->object;
35468 mutex_unlock(&item->mutex);
35469@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
35470 struct ttm_global_item *item = &glob[ref->global_type];
35471
35472 mutex_lock(&item->mutex);
35473- BUG_ON(item->refcount == 0);
35474+ BUG_ON(atomic_read(&item->refcount) == 0);
35475 BUG_ON(ref->object != item->object);
35476- if (--item->refcount == 0) {
35477+ if (atomic_dec_and_test(&item->refcount)) {
35478 ref->release(ref);
35479 item->object = NULL;
35480 }
35481diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
35482index 072c281..d8ef483 100644
35483--- a/drivers/gpu/drm/ttm/ttm_memory.c
35484+++ b/drivers/gpu/drm/ttm/ttm_memory.c
35485@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
35486 NULL
35487 };
35488
35489-static struct sysfs_ops ttm_mem_zone_ops = {
35490+static const struct sysfs_ops ttm_mem_zone_ops = {
35491 .show = &ttm_mem_zone_show,
35492 .store = &ttm_mem_zone_store
35493 };
35494diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
35495index cafcb84..b8e66cc 100644
35496--- a/drivers/gpu/drm/via/via_drv.h
35497+++ b/drivers/gpu/drm/via/via_drv.h
35498@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
35499 typedef uint32_t maskarray_t[5];
35500
35501 typedef struct drm_via_irq {
35502- atomic_t irq_received;
35503+ atomic_unchecked_t irq_received;
35504 uint32_t pending_mask;
35505 uint32_t enable_mask;
35506 wait_queue_head_t irq_queue;
35507@@ -75,7 +75,7 @@ typedef struct drm_via_private {
35508 struct timeval last_vblank;
35509 int last_vblank_valid;
35510 unsigned usec_per_vblank;
35511- atomic_t vbl_received;
35512+ atomic_unchecked_t vbl_received;
35513 drm_via_state_t hc_state;
35514 char pci_buf[VIA_PCI_BUF_SIZE];
35515 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
35516diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
35517index 5935b88..127a8a6 100644
35518--- a/drivers/gpu/drm/via/via_irq.c
35519+++ b/drivers/gpu/drm/via/via_irq.c
35520@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
35521 if (crtc != 0)
35522 return 0;
35523
35524- return atomic_read(&dev_priv->vbl_received);
35525+ return atomic_read_unchecked(&dev_priv->vbl_received);
35526 }
35527
35528 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35529@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35530
35531 status = VIA_READ(VIA_REG_INTERRUPT);
35532 if (status & VIA_IRQ_VBLANK_PENDING) {
35533- atomic_inc(&dev_priv->vbl_received);
35534- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
35535+ atomic_inc_unchecked(&dev_priv->vbl_received);
35536+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
35537 do_gettimeofday(&cur_vblank);
35538 if (dev_priv->last_vblank_valid) {
35539 dev_priv->usec_per_vblank =
35540@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35541 dev_priv->last_vblank = cur_vblank;
35542 dev_priv->last_vblank_valid = 1;
35543 }
35544- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
35545+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
35546 DRM_DEBUG("US per vblank is: %u\n",
35547 dev_priv->usec_per_vblank);
35548 }
35549@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35550
35551 for (i = 0; i < dev_priv->num_irqs; ++i) {
35552 if (status & cur_irq->pending_mask) {
35553- atomic_inc(&cur_irq->irq_received);
35554+ atomic_inc_unchecked(&cur_irq->irq_received);
35555 DRM_WAKEUP(&cur_irq->irq_queue);
35556 handled = 1;
35557 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
35558@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
35559 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
35560 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
35561 masks[irq][4]));
35562- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
35563+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
35564 } else {
35565 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
35566 (((cur_irq_sequence =
35567- atomic_read(&cur_irq->irq_received)) -
35568+ atomic_read_unchecked(&cur_irq->irq_received)) -
35569 *sequence) <= (1 << 23)));
35570 }
35571 *sequence = cur_irq_sequence;
35572@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
35573 }
35574
35575 for (i = 0; i < dev_priv->num_irqs; ++i) {
35576- atomic_set(&cur_irq->irq_received, 0);
35577+ atomic_set_unchecked(&cur_irq->irq_received, 0);
35578 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
35579 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
35580 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
35581@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
35582 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
35583 case VIA_IRQ_RELATIVE:
35584 irqwait->request.sequence +=
35585- atomic_read(&cur_irq->irq_received);
35586+ atomic_read_unchecked(&cur_irq->irq_received);
35587 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
35588 case VIA_IRQ_ABSOLUTE:
35589 break;
35590diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
35591index aa8688d..6a0140c 100644
35592--- a/drivers/gpu/vga/vgaarb.c
35593+++ b/drivers/gpu/vga/vgaarb.c
35594@@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
35595 uc = &priv->cards[i];
35596 }
35597
35598- if (!uc)
35599- return -EINVAL;
35600+ if (!uc) {
35601+ ret_val = -EINVAL;
35602+ goto done;
35603+ }
35604
35605- if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
35606- return -EINVAL;
35607+ if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
35608+ ret_val = -EINVAL;
35609+ goto done;
35610+ }
35611
35612- if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
35613- return -EINVAL;
35614+ if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
35615+ ret_val = -EINVAL;
35616+ goto done;
35617+ }
35618
35619 vga_put(pdev, io_state);
35620
35621diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
35622index 11f8069..4783396 100644
35623--- a/drivers/hid/hid-core.c
35624+++ b/drivers/hid/hid-core.c
35625@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev)
35626
35627 int hid_add_device(struct hid_device *hdev)
35628 {
35629- static atomic_t id = ATOMIC_INIT(0);
35630+ static atomic_unchecked_t id = ATOMIC_INIT(0);
35631 int ret;
35632
35633 if (WARN_ON(hdev->status & HID_STAT_ADDED))
35634@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev)
35635 /* XXX hack, any other cleaner solution after the driver core
35636 * is converted to allow more than 20 bytes as the device name? */
35637 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
35638- hdev->vendor, hdev->product, atomic_inc_return(&id));
35639+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
35640
35641 ret = device_add(&hdev->dev);
35642 if (!ret)
35643diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
35644index 8b6ee24..70f657d 100644
35645--- a/drivers/hid/usbhid/hiddev.c
35646+++ b/drivers/hid/usbhid/hiddev.c
35647@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
35648 return put_user(HID_VERSION, (int __user *)arg);
35649
35650 case HIDIOCAPPLICATION:
35651- if (arg < 0 || arg >= hid->maxapplication)
35652+ if (arg >= hid->maxapplication)
35653 return -EINVAL;
35654
35655 for (i = 0; i < hid->maxcollection; i++)
35656diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
35657index 5d5ed69..f40533e 100644
35658--- a/drivers/hwmon/lis3lv02d.c
35659+++ b/drivers/hwmon/lis3lv02d.c
35660@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
35661 * the lid is closed. This leads to interrupts as soon as a little move
35662 * is done.
35663 */
35664- atomic_inc(&lis3_dev.count);
35665+ atomic_inc_unchecked(&lis3_dev.count);
35666
35667 wake_up_interruptible(&lis3_dev.misc_wait);
35668 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
35669@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
35670 if (test_and_set_bit(0, &lis3_dev.misc_opened))
35671 return -EBUSY; /* already open */
35672
35673- atomic_set(&lis3_dev.count, 0);
35674+ atomic_set_unchecked(&lis3_dev.count, 0);
35675
35676 /*
35677 * The sensor can generate interrupts for free-fall and direction
35678@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
35679 add_wait_queue(&lis3_dev.misc_wait, &wait);
35680 while (true) {
35681 set_current_state(TASK_INTERRUPTIBLE);
35682- data = atomic_xchg(&lis3_dev.count, 0);
35683+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
35684 if (data)
35685 break;
35686
35687@@ -244,7 +244,7 @@ out:
35688 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
35689 {
35690 poll_wait(file, &lis3_dev.misc_wait, wait);
35691- if (atomic_read(&lis3_dev.count))
35692+ if (atomic_read_unchecked(&lis3_dev.count))
35693 return POLLIN | POLLRDNORM;
35694 return 0;
35695 }
35696diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
35697index 7cdd76f..fe0efdf 100644
35698--- a/drivers/hwmon/lis3lv02d.h
35699+++ b/drivers/hwmon/lis3lv02d.h
35700@@ -201,7 +201,7 @@ struct lis3lv02d {
35701
35702 struct input_polled_dev *idev; /* input device */
35703 struct platform_device *pdev; /* platform device */
35704- atomic_t count; /* interrupt count after last read */
35705+ atomic_unchecked_t count; /* interrupt count after last read */
35706 int xcalib; /* calibrated null value for x */
35707 int ycalib; /* calibrated null value for y */
35708 int zcalib; /* calibrated null value for z */
35709diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
35710index 740785e..5a5c6c6 100644
35711--- a/drivers/hwmon/sht15.c
35712+++ b/drivers/hwmon/sht15.c
35713@@ -112,7 +112,7 @@ struct sht15_data {
35714 int supply_uV;
35715 int supply_uV_valid;
35716 struct work_struct update_supply_work;
35717- atomic_t interrupt_handled;
35718+ atomic_unchecked_t interrupt_handled;
35719 };
35720
35721 /**
35722@@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data,
35723 return ret;
35724
35725 gpio_direction_input(data->pdata->gpio_data);
35726- atomic_set(&data->interrupt_handled, 0);
35727+ atomic_set_unchecked(&data->interrupt_handled, 0);
35728
35729 enable_irq(gpio_to_irq(data->pdata->gpio_data));
35730 if (gpio_get_value(data->pdata->gpio_data) == 0) {
35731 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
35732 /* Only relevant if the interrupt hasn't occured. */
35733- if (!atomic_read(&data->interrupt_handled))
35734+ if (!atomic_read_unchecked(&data->interrupt_handled))
35735 schedule_work(&data->read_work);
35736 }
35737 ret = wait_event_timeout(data->wait_queue,
35738@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
35739 struct sht15_data *data = d;
35740 /* First disable the interrupt */
35741 disable_irq_nosync(irq);
35742- atomic_inc(&data->interrupt_handled);
35743+ atomic_inc_unchecked(&data->interrupt_handled);
35744 /* Then schedule a reading work struct */
35745 if (data->flag != SHT15_READING_NOTHING)
35746 schedule_work(&data->read_work);
35747@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
35748 here as could have gone low in meantime so verify
35749 it hasn't!
35750 */
35751- atomic_set(&data->interrupt_handled, 0);
35752+ atomic_set_unchecked(&data->interrupt_handled, 0);
35753 enable_irq(gpio_to_irq(data->pdata->gpio_data));
35754 /* If still not occured or another handler has been scheduled */
35755 if (gpio_get_value(data->pdata->gpio_data)
35756- || atomic_read(&data->interrupt_handled))
35757+ || atomic_read_unchecked(&data->interrupt_handled))
35758 return;
35759 }
35760 /* Read the data back from the device */
35761diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
35762index 97851c5..cb40626 100644
35763--- a/drivers/hwmon/w83791d.c
35764+++ b/drivers/hwmon/w83791d.c
35765@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind,
35766 struct i2c_board_info *info);
35767 static int w83791d_remove(struct i2c_client *client);
35768
35769-static int w83791d_read(struct i2c_client *client, u8 register);
35770-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
35771+static int w83791d_read(struct i2c_client *client, u8 reg);
35772+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
35773 static struct w83791d_data *w83791d_update_device(struct device *dev);
35774
35775 #ifdef DEBUG
35776diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
35777index 378fcb5..5e91fa8 100644
35778--- a/drivers/i2c/busses/i2c-amd756-s4882.c
35779+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
35780@@ -43,7 +43,7 @@
35781 extern struct i2c_adapter amd756_smbus;
35782
35783 static struct i2c_adapter *s4882_adapter;
35784-static struct i2c_algorithm *s4882_algo;
35785+static i2c_algorithm_no_const *s4882_algo;
35786
35787 /* Wrapper access functions for multiplexed SMBus */
35788 static DEFINE_MUTEX(amd756_lock);
35789diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
35790index 29015eb..af2d8e9 100644
35791--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
35792+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
35793@@ -41,7 +41,7 @@
35794 extern struct i2c_adapter *nforce2_smbus;
35795
35796 static struct i2c_adapter *s4985_adapter;
35797-static struct i2c_algorithm *s4985_algo;
35798+static i2c_algorithm_no_const *s4985_algo;
35799
35800 /* Wrapper access functions for multiplexed SMBus */
35801 static DEFINE_MUTEX(nforce2_lock);
35802diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
35803index 878f8ec..12376fc 100644
35804--- a/drivers/ide/aec62xx.c
35805+++ b/drivers/ide/aec62xx.c
35806@@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = {
35807 .cable_detect = atp86x_cable_detect,
35808 };
35809
35810-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
35811+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
35812 { /* 0: AEC6210 */
35813 .name = DRV_NAME,
35814 .init_chipset = init_chipset_aec62xx,
35815diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
35816index e59b6de..4b4fc65 100644
35817--- a/drivers/ide/alim15x3.c
35818+++ b/drivers/ide/alim15x3.c
35819@@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = {
35820 .dma_sff_read_status = ide_dma_sff_read_status,
35821 };
35822
35823-static const struct ide_port_info ali15x3_chipset __devinitdata = {
35824+static const struct ide_port_info ali15x3_chipset __devinitconst = {
35825 .name = DRV_NAME,
35826 .init_chipset = init_chipset_ali15x3,
35827 .init_hwif = init_hwif_ali15x3,
35828diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
35829index 628cd2e..087a414 100644
35830--- a/drivers/ide/amd74xx.c
35831+++ b/drivers/ide/amd74xx.c
35832@@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = {
35833 .udma_mask = udma, \
35834 }
35835
35836-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
35837+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
35838 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
35839 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
35840 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
35841diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
35842index 837322b..837fd71 100644
35843--- a/drivers/ide/atiixp.c
35844+++ b/drivers/ide/atiixp.c
35845@@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = {
35846 .cable_detect = atiixp_cable_detect,
35847 };
35848
35849-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
35850+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
35851 { /* 0: IXP200/300/400/700 */
35852 .name = DRV_NAME,
35853 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
35854diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
35855index ca0c46f..d55318a 100644
35856--- a/drivers/ide/cmd64x.c
35857+++ b/drivers/ide/cmd64x.c
35858@@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
35859 .dma_sff_read_status = ide_dma_sff_read_status,
35860 };
35861
35862-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
35863+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
35864 { /* 0: CMD643 */
35865 .name = DRV_NAME,
35866 .init_chipset = init_chipset_cmd64x,
35867diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
35868index 09f98ed..cebc5bc 100644
35869--- a/drivers/ide/cs5520.c
35870+++ b/drivers/ide/cs5520.c
35871@@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = {
35872 .set_dma_mode = cs5520_set_dma_mode,
35873 };
35874
35875-static const struct ide_port_info cyrix_chipset __devinitdata = {
35876+static const struct ide_port_info cyrix_chipset __devinitconst = {
35877 .name = DRV_NAME,
35878 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
35879 .port_ops = &cs5520_port_ops,
35880diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
35881index 40bf05e..7d58ca0 100644
35882--- a/drivers/ide/cs5530.c
35883+++ b/drivers/ide/cs5530.c
35884@@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
35885 .udma_filter = cs5530_udma_filter,
35886 };
35887
35888-static const struct ide_port_info cs5530_chipset __devinitdata = {
35889+static const struct ide_port_info cs5530_chipset __devinitconst = {
35890 .name = DRV_NAME,
35891 .init_chipset = init_chipset_cs5530,
35892 .init_hwif = init_hwif_cs5530,
35893diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
35894index 983d957..53e6172 100644
35895--- a/drivers/ide/cs5535.c
35896+++ b/drivers/ide/cs5535.c
35897@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
35898 .cable_detect = cs5535_cable_detect,
35899 };
35900
35901-static const struct ide_port_info cs5535_chipset __devinitdata = {
35902+static const struct ide_port_info cs5535_chipset __devinitconst = {
35903 .name = DRV_NAME,
35904 .port_ops = &cs5535_port_ops,
35905 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
35906diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
35907index 74fc540..8e933d8 100644
35908--- a/drivers/ide/cy82c693.c
35909+++ b/drivers/ide/cy82c693.c
35910@@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
35911 .set_dma_mode = cy82c693_set_dma_mode,
35912 };
35913
35914-static const struct ide_port_info cy82c693_chipset __devinitdata = {
35915+static const struct ide_port_info cy82c693_chipset __devinitconst = {
35916 .name = DRV_NAME,
35917 .init_iops = init_iops_cy82c693,
35918 .port_ops = &cy82c693_port_ops,
35919diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
35920index 7ce68ef..e78197d 100644
35921--- a/drivers/ide/hpt366.c
35922+++ b/drivers/ide/hpt366.c
35923@@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = {
35924 }
35925 };
35926
35927-static const struct hpt_info hpt36x __devinitdata = {
35928+static const struct hpt_info hpt36x __devinitconst = {
35929 .chip_name = "HPT36x",
35930 .chip_type = HPT36x,
35931 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
35932@@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = {
35933 .timings = &hpt36x_timings
35934 };
35935
35936-static const struct hpt_info hpt370 __devinitdata = {
35937+static const struct hpt_info hpt370 __devinitconst = {
35938 .chip_name = "HPT370",
35939 .chip_type = HPT370,
35940 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
35941@@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = {
35942 .timings = &hpt37x_timings
35943 };
35944
35945-static const struct hpt_info hpt370a __devinitdata = {
35946+static const struct hpt_info hpt370a __devinitconst = {
35947 .chip_name = "HPT370A",
35948 .chip_type = HPT370A,
35949 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
35950@@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = {
35951 .timings = &hpt37x_timings
35952 };
35953
35954-static const struct hpt_info hpt374 __devinitdata = {
35955+static const struct hpt_info hpt374 __devinitconst = {
35956 .chip_name = "HPT374",
35957 .chip_type = HPT374,
35958 .udma_mask = ATA_UDMA5,
35959@@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = {
35960 .timings = &hpt37x_timings
35961 };
35962
35963-static const struct hpt_info hpt372 __devinitdata = {
35964+static const struct hpt_info hpt372 __devinitconst = {
35965 .chip_name = "HPT372",
35966 .chip_type = HPT372,
35967 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35968@@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = {
35969 .timings = &hpt37x_timings
35970 };
35971
35972-static const struct hpt_info hpt372a __devinitdata = {
35973+static const struct hpt_info hpt372a __devinitconst = {
35974 .chip_name = "HPT372A",
35975 .chip_type = HPT372A,
35976 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35977@@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = {
35978 .timings = &hpt37x_timings
35979 };
35980
35981-static const struct hpt_info hpt302 __devinitdata = {
35982+static const struct hpt_info hpt302 __devinitconst = {
35983 .chip_name = "HPT302",
35984 .chip_type = HPT302,
35985 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35986@@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = {
35987 .timings = &hpt37x_timings
35988 };
35989
35990-static const struct hpt_info hpt371 __devinitdata = {
35991+static const struct hpt_info hpt371 __devinitconst = {
35992 .chip_name = "HPT371",
35993 .chip_type = HPT371,
35994 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35995@@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = {
35996 .timings = &hpt37x_timings
35997 };
35998
35999-static const struct hpt_info hpt372n __devinitdata = {
36000+static const struct hpt_info hpt372n __devinitconst = {
36001 .chip_name = "HPT372N",
36002 .chip_type = HPT372N,
36003 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
36004@@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = {
36005 .timings = &hpt37x_timings
36006 };
36007
36008-static const struct hpt_info hpt302n __devinitdata = {
36009+static const struct hpt_info hpt302n __devinitconst = {
36010 .chip_name = "HPT302N",
36011 .chip_type = HPT302N,
36012 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
36013@@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = {
36014 .timings = &hpt37x_timings
36015 };
36016
36017-static const struct hpt_info hpt371n __devinitdata = {
36018+static const struct hpt_info hpt371n __devinitconst = {
36019 .chip_name = "HPT371N",
36020 .chip_type = HPT371N,
36021 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
36022@@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
36023 .dma_sff_read_status = ide_dma_sff_read_status,
36024 };
36025
36026-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
36027+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
36028 { /* 0: HPT36x */
36029 .name = DRV_NAME,
36030 .init_chipset = init_chipset_hpt366,
36031diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
36032index 2de76cc..74186a1 100644
36033--- a/drivers/ide/ide-cd.c
36034+++ b/drivers/ide/ide-cd.c
36035@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
36036 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
36037 if ((unsigned long)buf & alignment
36038 || blk_rq_bytes(rq) & q->dma_pad_mask
36039- || object_is_on_stack(buf))
36040+ || object_starts_on_stack(buf))
36041 drive->dma = 0;
36042 }
36043 }
36044diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
36045index fefbdfc..62ff465 100644
36046--- a/drivers/ide/ide-floppy.c
36047+++ b/drivers/ide/ide-floppy.c
36048@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
36049 u8 pc_buf[256], header_len, desc_cnt;
36050 int i, rc = 1, blocks, length;
36051
36052+ pax_track_stack();
36053+
36054 ide_debug_log(IDE_DBG_FUNC, "enter");
36055
36056 drive->bios_cyl = 0;
36057diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
36058index 39d4e01..11538ce 100644
36059--- a/drivers/ide/ide-pci-generic.c
36060+++ b/drivers/ide/ide-pci-generic.c
36061@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
36062 .udma_mask = ATA_UDMA6, \
36063 }
36064
36065-static const struct ide_port_info generic_chipsets[] __devinitdata = {
36066+static const struct ide_port_info generic_chipsets[] __devinitconst = {
36067 /* 0: Unknown */
36068 DECLARE_GENERIC_PCI_DEV(0),
36069
36070diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
36071index 0d266a5..aaca790 100644
36072--- a/drivers/ide/it8172.c
36073+++ b/drivers/ide/it8172.c
36074@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
36075 .set_dma_mode = it8172_set_dma_mode,
36076 };
36077
36078-static const struct ide_port_info it8172_port_info __devinitdata = {
36079+static const struct ide_port_info it8172_port_info __devinitconst = {
36080 .name = DRV_NAME,
36081 .port_ops = &it8172_port_ops,
36082 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
36083diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
36084index 4797616..4be488a 100644
36085--- a/drivers/ide/it8213.c
36086+++ b/drivers/ide/it8213.c
36087@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
36088 .cable_detect = it8213_cable_detect,
36089 };
36090
36091-static const struct ide_port_info it8213_chipset __devinitdata = {
36092+static const struct ide_port_info it8213_chipset __devinitconst = {
36093 .name = DRV_NAME,
36094 .enablebits = { {0x41, 0x80, 0x80} },
36095 .port_ops = &it8213_port_ops,
36096diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
36097index 51aa745..146ee60 100644
36098--- a/drivers/ide/it821x.c
36099+++ b/drivers/ide/it821x.c
36100@@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = {
36101 .cable_detect = it821x_cable_detect,
36102 };
36103
36104-static const struct ide_port_info it821x_chipset __devinitdata = {
36105+static const struct ide_port_info it821x_chipset __devinitconst = {
36106 .name = DRV_NAME,
36107 .init_chipset = init_chipset_it821x,
36108 .init_hwif = init_hwif_it821x,
36109diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
36110index bf2be64..9270098 100644
36111--- a/drivers/ide/jmicron.c
36112+++ b/drivers/ide/jmicron.c
36113@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
36114 .cable_detect = jmicron_cable_detect,
36115 };
36116
36117-static const struct ide_port_info jmicron_chipset __devinitdata = {
36118+static const struct ide_port_info jmicron_chipset __devinitconst = {
36119 .name = DRV_NAME,
36120 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
36121 .port_ops = &jmicron_port_ops,
36122diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
36123index 95327a2..73f78d8 100644
36124--- a/drivers/ide/ns87415.c
36125+++ b/drivers/ide/ns87415.c
36126@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
36127 .dma_sff_read_status = superio_dma_sff_read_status,
36128 };
36129
36130-static const struct ide_port_info ns87415_chipset __devinitdata = {
36131+static const struct ide_port_info ns87415_chipset __devinitconst = {
36132 .name = DRV_NAME,
36133 .init_hwif = init_hwif_ns87415,
36134 .tp_ops = &ns87415_tp_ops,
36135diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
36136index f1d70d6..e1de05b 100644
36137--- a/drivers/ide/opti621.c
36138+++ b/drivers/ide/opti621.c
36139@@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
36140 .set_pio_mode = opti621_set_pio_mode,
36141 };
36142
36143-static const struct ide_port_info opti621_chipset __devinitdata = {
36144+static const struct ide_port_info opti621_chipset __devinitconst = {
36145 .name = DRV_NAME,
36146 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
36147 .port_ops = &opti621_port_ops,
36148diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
36149index 65ba823..7311f4d 100644
36150--- a/drivers/ide/pdc202xx_new.c
36151+++ b/drivers/ide/pdc202xx_new.c
36152@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
36153 .udma_mask = udma, \
36154 }
36155
36156-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
36157+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
36158 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
36159 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
36160 };
36161diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
36162index cb812f3..af816ef 100644
36163--- a/drivers/ide/pdc202xx_old.c
36164+++ b/drivers/ide/pdc202xx_old.c
36165@@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
36166 .max_sectors = sectors, \
36167 }
36168
36169-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
36170+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
36171 { /* 0: PDC20246 */
36172 .name = DRV_NAME,
36173 .init_chipset = init_chipset_pdc202xx,
36174diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
36175index bf14f39..15c4b98 100644
36176--- a/drivers/ide/piix.c
36177+++ b/drivers/ide/piix.c
36178@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
36179 .udma_mask = udma, \
36180 }
36181
36182-static const struct ide_port_info piix_pci_info[] __devinitdata = {
36183+static const struct ide_port_info piix_pci_info[] __devinitconst = {
36184 /* 0: MPIIX */
36185 { /*
36186 * MPIIX actually has only a single IDE channel mapped to
36187diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
36188index a6414a8..c04173e 100644
36189--- a/drivers/ide/rz1000.c
36190+++ b/drivers/ide/rz1000.c
36191@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
36192 }
36193 }
36194
36195-static const struct ide_port_info rz1000_chipset __devinitdata = {
36196+static const struct ide_port_info rz1000_chipset __devinitconst = {
36197 .name = DRV_NAME,
36198 .host_flags = IDE_HFLAG_NO_DMA,
36199 };
36200diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
36201index d467478..9203942 100644
36202--- a/drivers/ide/sc1200.c
36203+++ b/drivers/ide/sc1200.c
36204@@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
36205 .dma_sff_read_status = ide_dma_sff_read_status,
36206 };
36207
36208-static const struct ide_port_info sc1200_chipset __devinitdata = {
36209+static const struct ide_port_info sc1200_chipset __devinitconst = {
36210 .name = DRV_NAME,
36211 .port_ops = &sc1200_port_ops,
36212 .dma_ops = &sc1200_dma_ops,
36213diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
36214index 1104bb3..59c5194 100644
36215--- a/drivers/ide/scc_pata.c
36216+++ b/drivers/ide/scc_pata.c
36217@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
36218 .dma_sff_read_status = scc_dma_sff_read_status,
36219 };
36220
36221-static const struct ide_port_info scc_chipset __devinitdata = {
36222+static const struct ide_port_info scc_chipset __devinitconst = {
36223 .name = "sccIDE",
36224 .init_iops = init_iops_scc,
36225 .init_dma = scc_init_dma,
36226diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
36227index b6554ef..6cc2cc3 100644
36228--- a/drivers/ide/serverworks.c
36229+++ b/drivers/ide/serverworks.c
36230@@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = {
36231 .cable_detect = svwks_cable_detect,
36232 };
36233
36234-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
36235+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
36236 { /* 0: OSB4 */
36237 .name = DRV_NAME,
36238 .init_chipset = init_chipset_svwks,
36239diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
36240index ab3db61..afed580 100644
36241--- a/drivers/ide/setup-pci.c
36242+++ b/drivers/ide/setup-pci.c
36243@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
36244 int ret, i, n_ports = dev2 ? 4 : 2;
36245 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
36246
36247+ pax_track_stack();
36248+
36249 for (i = 0; i < n_ports / 2; i++) {
36250 ret = ide_setup_pci_controller(pdev[i], d, !i);
36251 if (ret < 0)
36252diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
36253index d95df52..0b03a39 100644
36254--- a/drivers/ide/siimage.c
36255+++ b/drivers/ide/siimage.c
36256@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
36257 .udma_mask = ATA_UDMA6, \
36258 }
36259
36260-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
36261+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
36262 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
36263 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
36264 };
36265diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
36266index 3b88eba..ca8699d 100644
36267--- a/drivers/ide/sis5513.c
36268+++ b/drivers/ide/sis5513.c
36269@@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
36270 .cable_detect = sis_cable_detect,
36271 };
36272
36273-static const struct ide_port_info sis5513_chipset __devinitdata = {
36274+static const struct ide_port_info sis5513_chipset __devinitconst = {
36275 .name = DRV_NAME,
36276 .init_chipset = init_chipset_sis5513,
36277 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
36278diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
36279index d698da4..fca42a4 100644
36280--- a/drivers/ide/sl82c105.c
36281+++ b/drivers/ide/sl82c105.c
36282@@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
36283 .dma_sff_read_status = ide_dma_sff_read_status,
36284 };
36285
36286-static const struct ide_port_info sl82c105_chipset __devinitdata = {
36287+static const struct ide_port_info sl82c105_chipset __devinitconst = {
36288 .name = DRV_NAME,
36289 .init_chipset = init_chipset_sl82c105,
36290 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
36291diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
36292index 1ccfb40..83d5779 100644
36293--- a/drivers/ide/slc90e66.c
36294+++ b/drivers/ide/slc90e66.c
36295@@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
36296 .cable_detect = slc90e66_cable_detect,
36297 };
36298
36299-static const struct ide_port_info slc90e66_chipset __devinitdata = {
36300+static const struct ide_port_info slc90e66_chipset __devinitconst = {
36301 .name = DRV_NAME,
36302 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
36303 .port_ops = &slc90e66_port_ops,
36304diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
36305index 05a93d6..5f9e325 100644
36306--- a/drivers/ide/tc86c001.c
36307+++ b/drivers/ide/tc86c001.c
36308@@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
36309 .dma_sff_read_status = ide_dma_sff_read_status,
36310 };
36311
36312-static const struct ide_port_info tc86c001_chipset __devinitdata = {
36313+static const struct ide_port_info tc86c001_chipset __devinitconst = {
36314 .name = DRV_NAME,
36315 .init_hwif = init_hwif_tc86c001,
36316 .port_ops = &tc86c001_port_ops,
36317diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
36318index 8773c3b..7907d6c 100644
36319--- a/drivers/ide/triflex.c
36320+++ b/drivers/ide/triflex.c
36321@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
36322 .set_dma_mode = triflex_set_mode,
36323 };
36324
36325-static const struct ide_port_info triflex_device __devinitdata = {
36326+static const struct ide_port_info triflex_device __devinitconst = {
36327 .name = DRV_NAME,
36328 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
36329 .port_ops = &triflex_port_ops,
36330diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
36331index 4b42ca0..e494a98 100644
36332--- a/drivers/ide/trm290.c
36333+++ b/drivers/ide/trm290.c
36334@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
36335 .dma_check = trm290_dma_check,
36336 };
36337
36338-static const struct ide_port_info trm290_chipset __devinitdata = {
36339+static const struct ide_port_info trm290_chipset __devinitconst = {
36340 .name = DRV_NAME,
36341 .init_hwif = init_hwif_trm290,
36342 .tp_ops = &trm290_tp_ops,
36343diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
36344index 028de26..520d5d5 100644
36345--- a/drivers/ide/via82cxxx.c
36346+++ b/drivers/ide/via82cxxx.c
36347@@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = {
36348 .cable_detect = via82cxxx_cable_detect,
36349 };
36350
36351-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
36352+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
36353 .name = DRV_NAME,
36354 .init_chipset = init_chipset_via82cxxx,
36355 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
36356diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
36357index 2cd00b5..14de699 100644
36358--- a/drivers/ieee1394/dv1394.c
36359+++ b/drivers/ieee1394/dv1394.c
36360@@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
36361 based upon DIF section and sequence
36362 */
36363
36364-static void inline
36365+static inline void
36366 frame_put_packet (struct frame *f, struct packet *p)
36367 {
36368 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
36369diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
36370index e947d8f..6a966b9 100644
36371--- a/drivers/ieee1394/hosts.c
36372+++ b/drivers/ieee1394/hosts.c
36373@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
36374 }
36375
36376 static struct hpsb_host_driver dummy_driver = {
36377+ .name = "dummy",
36378 .transmit_packet = dummy_transmit_packet,
36379 .devctl = dummy_devctl,
36380 .isoctl = dummy_isoctl
36381diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
36382index ddaab6e..8d37435 100644
36383--- a/drivers/ieee1394/init_ohci1394_dma.c
36384+++ b/drivers/ieee1394/init_ohci1394_dma.c
36385@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
36386 for (func = 0; func < 8; func++) {
36387 u32 class = read_pci_config(num,slot,func,
36388 PCI_CLASS_REVISION);
36389- if ((class == 0xffffffff))
36390+ if (class == 0xffffffff)
36391 continue; /* No device at this func */
36392
36393 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
36394diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
36395index 65c1429..5d8c11f 100644
36396--- a/drivers/ieee1394/ohci1394.c
36397+++ b/drivers/ieee1394/ohci1394.c
36398@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
36399 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
36400
36401 /* Module Parameters */
36402-static int phys_dma = 1;
36403+static int phys_dma;
36404 module_param(phys_dma, int, 0444);
36405-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
36406+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
36407
36408 static void dma_trm_tasklet(unsigned long data);
36409 static void dma_trm_reset(struct dma_trm_ctx *d);
36410diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
36411index f199896..78c9fc8 100644
36412--- a/drivers/ieee1394/sbp2.c
36413+++ b/drivers/ieee1394/sbp2.c
36414@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
36415 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
36416 MODULE_LICENSE("GPL");
36417
36418-static int sbp2_module_init(void)
36419+static int __init sbp2_module_init(void)
36420 {
36421 int ret;
36422
36423diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
36424index a5dea6b..0cefe8f 100644
36425--- a/drivers/infiniband/core/cm.c
36426+++ b/drivers/infiniband/core/cm.c
36427@@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
36428
36429 struct cm_counter_group {
36430 struct kobject obj;
36431- atomic_long_t counter[CM_ATTR_COUNT];
36432+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
36433 };
36434
36435 struct cm_counter_attribute {
36436@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work,
36437 struct ib_mad_send_buf *msg = NULL;
36438 int ret;
36439
36440- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36441+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36442 counter[CM_REQ_COUNTER]);
36443
36444 /* Quick state check to discard duplicate REQs. */
36445@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
36446 if (!cm_id_priv)
36447 return;
36448
36449- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36450+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36451 counter[CM_REP_COUNTER]);
36452 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
36453 if (ret)
36454@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work)
36455 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
36456 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
36457 spin_unlock_irq(&cm_id_priv->lock);
36458- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36459+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36460 counter[CM_RTU_COUNTER]);
36461 goto out;
36462 }
36463@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work)
36464 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
36465 dreq_msg->local_comm_id);
36466 if (!cm_id_priv) {
36467- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36468+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36469 counter[CM_DREQ_COUNTER]);
36470 cm_issue_drep(work->port, work->mad_recv_wc);
36471 return -EINVAL;
36472@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work)
36473 case IB_CM_MRA_REP_RCVD:
36474 break;
36475 case IB_CM_TIMEWAIT:
36476- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36477+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36478 counter[CM_DREQ_COUNTER]);
36479 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36480 goto unlock;
36481@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work)
36482 cm_free_msg(msg);
36483 goto deref;
36484 case IB_CM_DREQ_RCVD:
36485- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36486+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36487 counter[CM_DREQ_COUNTER]);
36488 goto unlock;
36489 default:
36490@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work)
36491 ib_modify_mad(cm_id_priv->av.port->mad_agent,
36492 cm_id_priv->msg, timeout)) {
36493 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
36494- atomic_long_inc(&work->port->
36495+ atomic_long_inc_unchecked(&work->port->
36496 counter_group[CM_RECV_DUPLICATES].
36497 counter[CM_MRA_COUNTER]);
36498 goto out;
36499@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work)
36500 break;
36501 case IB_CM_MRA_REQ_RCVD:
36502 case IB_CM_MRA_REP_RCVD:
36503- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36504+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36505 counter[CM_MRA_COUNTER]);
36506 /* fall through */
36507 default:
36508@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work)
36509 case IB_CM_LAP_IDLE:
36510 break;
36511 case IB_CM_MRA_LAP_SENT:
36512- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36513+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36514 counter[CM_LAP_COUNTER]);
36515 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36516 goto unlock;
36517@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work)
36518 cm_free_msg(msg);
36519 goto deref;
36520 case IB_CM_LAP_RCVD:
36521- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36522+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36523 counter[CM_LAP_COUNTER]);
36524 goto unlock;
36525 default:
36526@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
36527 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
36528 if (cur_cm_id_priv) {
36529 spin_unlock_irq(&cm.lock);
36530- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36531+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36532 counter[CM_SIDR_REQ_COUNTER]);
36533 goto out; /* Duplicate message. */
36534 }
36535@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
36536 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
36537 msg->retries = 1;
36538
36539- atomic_long_add(1 + msg->retries,
36540+ atomic_long_add_unchecked(1 + msg->retries,
36541 &port->counter_group[CM_XMIT].counter[attr_index]);
36542 if (msg->retries)
36543- atomic_long_add(msg->retries,
36544+ atomic_long_add_unchecked(msg->retries,
36545 &port->counter_group[CM_XMIT_RETRIES].
36546 counter[attr_index]);
36547
36548@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
36549 }
36550
36551 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
36552- atomic_long_inc(&port->counter_group[CM_RECV].
36553+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
36554 counter[attr_id - CM_ATTR_ID_OFFSET]);
36555
36556 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
36557@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
36558 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
36559
36560 return sprintf(buf, "%ld\n",
36561- atomic_long_read(&group->counter[cm_attr->index]));
36562+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
36563 }
36564
36565-static struct sysfs_ops cm_counter_ops = {
36566+static const struct sysfs_ops cm_counter_ops = {
36567 .show = cm_show_counter
36568 };
36569
36570diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
36571index 8fd3a6f..61d8075 100644
36572--- a/drivers/infiniband/core/cma.c
36573+++ b/drivers/infiniband/core/cma.c
36574@@ -2267,6 +2267,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
36575
36576 req.private_data_len = sizeof(struct cma_hdr) +
36577 conn_param->private_data_len;
36578+ if (req.private_data_len < conn_param->private_data_len)
36579+ return -EINVAL;
36580+
36581 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
36582 if (!req.private_data)
36583 return -ENOMEM;
36584@@ -2314,6 +2317,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
36585 memset(&req, 0, sizeof req);
36586 offset = cma_user_data_offset(id_priv->id.ps);
36587 req.private_data_len = offset + conn_param->private_data_len;
36588+ if (req.private_data_len < conn_param->private_data_len)
36589+ return -EINVAL;
36590+
36591 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
36592 if (!private_data)
36593 return -ENOMEM;
36594diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
36595index 4507043..14ad522 100644
36596--- a/drivers/infiniband/core/fmr_pool.c
36597+++ b/drivers/infiniband/core/fmr_pool.c
36598@@ -97,8 +97,8 @@ struct ib_fmr_pool {
36599
36600 struct task_struct *thread;
36601
36602- atomic_t req_ser;
36603- atomic_t flush_ser;
36604+ atomic_unchecked_t req_ser;
36605+ atomic_unchecked_t flush_ser;
36606
36607 wait_queue_head_t force_wait;
36608 };
36609@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36610 struct ib_fmr_pool *pool = pool_ptr;
36611
36612 do {
36613- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
36614+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
36615 ib_fmr_batch_release(pool);
36616
36617- atomic_inc(&pool->flush_ser);
36618+ atomic_inc_unchecked(&pool->flush_ser);
36619 wake_up_interruptible(&pool->force_wait);
36620
36621 if (pool->flush_function)
36622@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36623 }
36624
36625 set_current_state(TASK_INTERRUPTIBLE);
36626- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
36627+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
36628 !kthread_should_stop())
36629 schedule();
36630 __set_current_state(TASK_RUNNING);
36631@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
36632 pool->dirty_watermark = params->dirty_watermark;
36633 pool->dirty_len = 0;
36634 spin_lock_init(&pool->pool_lock);
36635- atomic_set(&pool->req_ser, 0);
36636- atomic_set(&pool->flush_ser, 0);
36637+ atomic_set_unchecked(&pool->req_ser, 0);
36638+ atomic_set_unchecked(&pool->flush_ser, 0);
36639 init_waitqueue_head(&pool->force_wait);
36640
36641 pool->thread = kthread_run(ib_fmr_cleanup_thread,
36642@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
36643 }
36644 spin_unlock_irq(&pool->pool_lock);
36645
36646- serial = atomic_inc_return(&pool->req_ser);
36647+ serial = atomic_inc_return_unchecked(&pool->req_ser);
36648 wake_up_process(pool->thread);
36649
36650 if (wait_event_interruptible(pool->force_wait,
36651- atomic_read(&pool->flush_ser) - serial >= 0))
36652+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
36653 return -EINTR;
36654
36655 return 0;
36656@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
36657 } else {
36658 list_add_tail(&fmr->list, &pool->dirty_list);
36659 if (++pool->dirty_len >= pool->dirty_watermark) {
36660- atomic_inc(&pool->req_ser);
36661+ atomic_inc_unchecked(&pool->req_ser);
36662 wake_up_process(pool->thread);
36663 }
36664 }
36665diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
36666index 158a214..1558bb7 100644
36667--- a/drivers/infiniband/core/sysfs.c
36668+++ b/drivers/infiniband/core/sysfs.c
36669@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj,
36670 return port_attr->show(p, port_attr, buf);
36671 }
36672
36673-static struct sysfs_ops port_sysfs_ops = {
36674+static const struct sysfs_ops port_sysfs_ops = {
36675 .show = port_attr_show
36676 };
36677
36678diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
36679index 5440da0..1194ecb 100644
36680--- a/drivers/infiniband/core/uverbs_marshall.c
36681+++ b/drivers/infiniband/core/uverbs_marshall.c
36682@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
36683 dst->grh.sgid_index = src->grh.sgid_index;
36684 dst->grh.hop_limit = src->grh.hop_limit;
36685 dst->grh.traffic_class = src->grh.traffic_class;
36686+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
36687 dst->dlid = src->dlid;
36688 dst->sl = src->sl;
36689 dst->src_path_bits = src->src_path_bits;
36690 dst->static_rate = src->static_rate;
36691 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
36692 dst->port_num = src->port_num;
36693+ dst->reserved = 0;
36694 }
36695 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
36696
36697 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
36698 struct ib_qp_attr *src)
36699 {
36700+ dst->qp_state = src->qp_state;
36701 dst->cur_qp_state = src->cur_qp_state;
36702 dst->path_mtu = src->path_mtu;
36703 dst->path_mig_state = src->path_mig_state;
36704@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
36705 dst->rnr_retry = src->rnr_retry;
36706 dst->alt_port_num = src->alt_port_num;
36707 dst->alt_timeout = src->alt_timeout;
36708+ memset(dst->reserved, 0, sizeof(dst->reserved));
36709 }
36710 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
36711
36712diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
36713index 100da85..e0d6609 100644
36714--- a/drivers/infiniband/hw/ipath/ipath_fs.c
36715+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
36716@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
36717 struct infinipath_counters counters;
36718 struct ipath_devdata *dd;
36719
36720+ pax_track_stack();
36721+
36722 dd = file->f_path.dentry->d_inode->i_private;
36723 dd->ipath_f_read_counters(dd, &counters);
36724
36725@@ -122,6 +124,8 @@ static const struct file_operations atomic_counters_ops = {
36726 };
36727
36728 static ssize_t flash_read(struct file *file, char __user *buf,
36729+ size_t count, loff_t *ppos) __size_overflow(3);
36730+static ssize_t flash_read(struct file *file, char __user *buf,
36731 size_t count, loff_t *ppos)
36732 {
36733 struct ipath_devdata *dd;
36734diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
36735index cbde0cf..afaf55c 100644
36736--- a/drivers/infiniband/hw/nes/nes.c
36737+++ b/drivers/infiniband/hw/nes/nes.c
36738@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
36739 LIST_HEAD(nes_adapter_list);
36740 static LIST_HEAD(nes_dev_list);
36741
36742-atomic_t qps_destroyed;
36743+atomic_unchecked_t qps_destroyed;
36744
36745 static unsigned int ee_flsh_adapter;
36746 static unsigned int sysfs_nonidx_addr;
36747@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
36748 struct nes_adapter *nesadapter = nesdev->nesadapter;
36749 u32 qp_id;
36750
36751- atomic_inc(&qps_destroyed);
36752+ atomic_inc_unchecked(&qps_destroyed);
36753
36754 /* Free the control structures */
36755
36756diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
36757index bcc6abc..9c76b2f 100644
36758--- a/drivers/infiniband/hw/nes/nes.h
36759+++ b/drivers/infiniband/hw/nes/nes.h
36760@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
36761 extern unsigned int wqm_quanta;
36762 extern struct list_head nes_adapter_list;
36763
36764-extern atomic_t cm_connects;
36765-extern atomic_t cm_accepts;
36766-extern atomic_t cm_disconnects;
36767-extern atomic_t cm_closes;
36768-extern atomic_t cm_connecteds;
36769-extern atomic_t cm_connect_reqs;
36770-extern atomic_t cm_rejects;
36771-extern atomic_t mod_qp_timouts;
36772-extern atomic_t qps_created;
36773-extern atomic_t qps_destroyed;
36774-extern atomic_t sw_qps_destroyed;
36775+extern atomic_unchecked_t cm_connects;
36776+extern atomic_unchecked_t cm_accepts;
36777+extern atomic_unchecked_t cm_disconnects;
36778+extern atomic_unchecked_t cm_closes;
36779+extern atomic_unchecked_t cm_connecteds;
36780+extern atomic_unchecked_t cm_connect_reqs;
36781+extern atomic_unchecked_t cm_rejects;
36782+extern atomic_unchecked_t mod_qp_timouts;
36783+extern atomic_unchecked_t qps_created;
36784+extern atomic_unchecked_t qps_destroyed;
36785+extern atomic_unchecked_t sw_qps_destroyed;
36786 extern u32 mh_detected;
36787 extern u32 mh_pauses_sent;
36788 extern u32 cm_packets_sent;
36789@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
36790 extern u32 cm_listens_created;
36791 extern u32 cm_listens_destroyed;
36792 extern u32 cm_backlog_drops;
36793-extern atomic_t cm_loopbacks;
36794-extern atomic_t cm_nodes_created;
36795-extern atomic_t cm_nodes_destroyed;
36796-extern atomic_t cm_accel_dropped_pkts;
36797-extern atomic_t cm_resets_recvd;
36798+extern atomic_unchecked_t cm_loopbacks;
36799+extern atomic_unchecked_t cm_nodes_created;
36800+extern atomic_unchecked_t cm_nodes_destroyed;
36801+extern atomic_unchecked_t cm_accel_dropped_pkts;
36802+extern atomic_unchecked_t cm_resets_recvd;
36803
36804 extern u32 int_mod_timer_init;
36805 extern u32 int_mod_cq_depth_256;
36806diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
36807index 73473db..5ed06e8 100644
36808--- a/drivers/infiniband/hw/nes/nes_cm.c
36809+++ b/drivers/infiniband/hw/nes/nes_cm.c
36810@@ -69,11 +69,11 @@ u32 cm_packets_received;
36811 u32 cm_listens_created;
36812 u32 cm_listens_destroyed;
36813 u32 cm_backlog_drops;
36814-atomic_t cm_loopbacks;
36815-atomic_t cm_nodes_created;
36816-atomic_t cm_nodes_destroyed;
36817-atomic_t cm_accel_dropped_pkts;
36818-atomic_t cm_resets_recvd;
36819+atomic_unchecked_t cm_loopbacks;
36820+atomic_unchecked_t cm_nodes_created;
36821+atomic_unchecked_t cm_nodes_destroyed;
36822+atomic_unchecked_t cm_accel_dropped_pkts;
36823+atomic_unchecked_t cm_resets_recvd;
36824
36825 static inline int mini_cm_accelerated(struct nes_cm_core *,
36826 struct nes_cm_node *);
36827@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
36828
36829 static struct nes_cm_core *g_cm_core;
36830
36831-atomic_t cm_connects;
36832-atomic_t cm_accepts;
36833-atomic_t cm_disconnects;
36834-atomic_t cm_closes;
36835-atomic_t cm_connecteds;
36836-atomic_t cm_connect_reqs;
36837-atomic_t cm_rejects;
36838+atomic_unchecked_t cm_connects;
36839+atomic_unchecked_t cm_accepts;
36840+atomic_unchecked_t cm_disconnects;
36841+atomic_unchecked_t cm_closes;
36842+atomic_unchecked_t cm_connecteds;
36843+atomic_unchecked_t cm_connect_reqs;
36844+atomic_unchecked_t cm_rejects;
36845
36846
36847 /**
36848@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
36849 cm_node->rem_mac);
36850
36851 add_hte_node(cm_core, cm_node);
36852- atomic_inc(&cm_nodes_created);
36853+ atomic_inc_unchecked(&cm_nodes_created);
36854
36855 return cm_node;
36856 }
36857@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
36858 }
36859
36860 atomic_dec(&cm_core->node_cnt);
36861- atomic_inc(&cm_nodes_destroyed);
36862+ atomic_inc_unchecked(&cm_nodes_destroyed);
36863 nesqp = cm_node->nesqp;
36864 if (nesqp) {
36865 nesqp->cm_node = NULL;
36866@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
36867
36868 static void drop_packet(struct sk_buff *skb)
36869 {
36870- atomic_inc(&cm_accel_dropped_pkts);
36871+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
36872 dev_kfree_skb_any(skb);
36873 }
36874
36875@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
36876
36877 int reset = 0; /* whether to send reset in case of err.. */
36878 int passive_state;
36879- atomic_inc(&cm_resets_recvd);
36880+ atomic_inc_unchecked(&cm_resets_recvd);
36881 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
36882 " refcnt=%d\n", cm_node, cm_node->state,
36883 atomic_read(&cm_node->ref_count));
36884@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
36885 rem_ref_cm_node(cm_node->cm_core, cm_node);
36886 return NULL;
36887 }
36888- atomic_inc(&cm_loopbacks);
36889+ atomic_inc_unchecked(&cm_loopbacks);
36890 loopbackremotenode->loopbackpartner = cm_node;
36891 loopbackremotenode->tcp_cntxt.rcv_wscale =
36892 NES_CM_DEFAULT_RCV_WND_SCALE;
36893@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
36894 add_ref_cm_node(cm_node);
36895 } else if (cm_node->state == NES_CM_STATE_TSA) {
36896 rem_ref_cm_node(cm_core, cm_node);
36897- atomic_inc(&cm_accel_dropped_pkts);
36898+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
36899 dev_kfree_skb_any(skb);
36900 break;
36901 }
36902@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
36903
36904 if ((cm_id) && (cm_id->event_handler)) {
36905 if (issue_disconn) {
36906- atomic_inc(&cm_disconnects);
36907+ atomic_inc_unchecked(&cm_disconnects);
36908 cm_event.event = IW_CM_EVENT_DISCONNECT;
36909 cm_event.status = disconn_status;
36910 cm_event.local_addr = cm_id->local_addr;
36911@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
36912 }
36913
36914 if (issue_close) {
36915- atomic_inc(&cm_closes);
36916+ atomic_inc_unchecked(&cm_closes);
36917 nes_disconnect(nesqp, 1);
36918
36919 cm_id->provider_data = nesqp;
36920@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
36921
36922 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
36923 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
36924- atomic_inc(&cm_accepts);
36925+ atomic_inc_unchecked(&cm_accepts);
36926
36927 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
36928 atomic_read(&nesvnic->netdev->refcnt));
36929@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
36930
36931 struct nes_cm_core *cm_core;
36932
36933- atomic_inc(&cm_rejects);
36934+ atomic_inc_unchecked(&cm_rejects);
36935 cm_node = (struct nes_cm_node *) cm_id->provider_data;
36936 loopback = cm_node->loopbackpartner;
36937 cm_core = cm_node->cm_core;
36938@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
36939 ntohl(cm_id->local_addr.sin_addr.s_addr),
36940 ntohs(cm_id->local_addr.sin_port));
36941
36942- atomic_inc(&cm_connects);
36943+ atomic_inc_unchecked(&cm_connects);
36944 nesqp->active_conn = 1;
36945
36946 /* cache the cm_id in the qp */
36947@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event)
36948 if (nesqp->destroyed) {
36949 return;
36950 }
36951- atomic_inc(&cm_connecteds);
36952+ atomic_inc_unchecked(&cm_connecteds);
36953 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
36954 " local port 0x%04X. jiffies = %lu.\n",
36955 nesqp->hwqp.qp_id,
36956@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event)
36957
36958 ret = cm_id->event_handler(cm_id, &cm_event);
36959 cm_id->add_ref(cm_id);
36960- atomic_inc(&cm_closes);
36961+ atomic_inc_unchecked(&cm_closes);
36962 cm_event.event = IW_CM_EVENT_CLOSE;
36963 cm_event.status = IW_CM_EVENT_STATUS_OK;
36964 cm_event.provider_data = cm_id->provider_data;
36965@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
36966 return;
36967 cm_id = cm_node->cm_id;
36968
36969- atomic_inc(&cm_connect_reqs);
36970+ atomic_inc_unchecked(&cm_connect_reqs);
36971 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
36972 cm_node, cm_id, jiffies);
36973
36974@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
36975 return;
36976 cm_id = cm_node->cm_id;
36977
36978- atomic_inc(&cm_connect_reqs);
36979+ atomic_inc_unchecked(&cm_connect_reqs);
36980 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
36981 cm_node, cm_id, jiffies);
36982
36983diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
36984index e593af3..870694a 100644
36985--- a/drivers/infiniband/hw/nes/nes_nic.c
36986+++ b/drivers/infiniband/hw/nes/nes_nic.c
36987@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
36988 target_stat_values[++index] = mh_detected;
36989 target_stat_values[++index] = mh_pauses_sent;
36990 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
36991- target_stat_values[++index] = atomic_read(&cm_connects);
36992- target_stat_values[++index] = atomic_read(&cm_accepts);
36993- target_stat_values[++index] = atomic_read(&cm_disconnects);
36994- target_stat_values[++index] = atomic_read(&cm_connecteds);
36995- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
36996- target_stat_values[++index] = atomic_read(&cm_rejects);
36997- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
36998- target_stat_values[++index] = atomic_read(&qps_created);
36999- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
37000- target_stat_values[++index] = atomic_read(&qps_destroyed);
37001- target_stat_values[++index] = atomic_read(&cm_closes);
37002+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
37003+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
37004+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
37005+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
37006+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
37007+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
37008+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
37009+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
37010+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
37011+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
37012+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
37013 target_stat_values[++index] = cm_packets_sent;
37014 target_stat_values[++index] = cm_packets_bounced;
37015 target_stat_values[++index] = cm_packets_created;
37016@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
37017 target_stat_values[++index] = cm_listens_created;
37018 target_stat_values[++index] = cm_listens_destroyed;
37019 target_stat_values[++index] = cm_backlog_drops;
37020- target_stat_values[++index] = atomic_read(&cm_loopbacks);
37021- target_stat_values[++index] = atomic_read(&cm_nodes_created);
37022- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
37023- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
37024- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
37025+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
37026+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
37027+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
37028+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
37029+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
37030 target_stat_values[++index] = int_mod_timer_init;
37031 target_stat_values[++index] = int_mod_cq_depth_1;
37032 target_stat_values[++index] = int_mod_cq_depth_4;
37033diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
37034index a680c42..f914deb 100644
37035--- a/drivers/infiniband/hw/nes/nes_verbs.c
37036+++ b/drivers/infiniband/hw/nes/nes_verbs.c
37037@@ -45,9 +45,9 @@
37038
37039 #include <rdma/ib_umem.h>
37040
37041-atomic_t mod_qp_timouts;
37042-atomic_t qps_created;
37043-atomic_t sw_qps_destroyed;
37044+atomic_unchecked_t mod_qp_timouts;
37045+atomic_unchecked_t qps_created;
37046+atomic_unchecked_t sw_qps_destroyed;
37047
37048 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
37049
37050@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
37051 if (init_attr->create_flags)
37052 return ERR_PTR(-EINVAL);
37053
37054- atomic_inc(&qps_created);
37055+ atomic_inc_unchecked(&qps_created);
37056 switch (init_attr->qp_type) {
37057 case IB_QPT_RC:
37058 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
37059@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
37060 struct iw_cm_event cm_event;
37061 int ret;
37062
37063- atomic_inc(&sw_qps_destroyed);
37064+ atomic_inc_unchecked(&sw_qps_destroyed);
37065 nesqp->destroyed = 1;
37066
37067 /* Blow away the connection if it exists. */
37068diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
37069index ac11be0..3883c04 100644
37070--- a/drivers/input/gameport/gameport.c
37071+++ b/drivers/input/gameport/gameport.c
37072@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
37073 */
37074 static void gameport_init_port(struct gameport *gameport)
37075 {
37076- static atomic_t gameport_no = ATOMIC_INIT(0);
37077+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
37078
37079 __module_get(THIS_MODULE);
37080
37081 mutex_init(&gameport->drv_mutex);
37082 device_initialize(&gameport->dev);
37083- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
37084+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
37085 gameport->dev.bus = &gameport_bus;
37086 gameport->dev.release = gameport_release_port;
37087 if (gameport->parent)
37088diff --git a/drivers/input/input.c b/drivers/input/input.c
37089index c82ae82..8cfb9cb 100644
37090--- a/drivers/input/input.c
37091+++ b/drivers/input/input.c
37092@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
37093 */
37094 int input_register_device(struct input_dev *dev)
37095 {
37096- static atomic_t input_no = ATOMIC_INIT(0);
37097+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
37098 struct input_handler *handler;
37099 const char *path;
37100 int error;
37101@@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev)
37102 dev->setkeycode = input_default_setkeycode;
37103
37104 dev_set_name(&dev->dev, "input%ld",
37105- (unsigned long) atomic_inc_return(&input_no) - 1);
37106+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
37107
37108 error = device_add(&dev->dev);
37109 if (error)
37110diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
37111index ca13a6b..b032b0c 100644
37112--- a/drivers/input/joystick/sidewinder.c
37113+++ b/drivers/input/joystick/sidewinder.c
37114@@ -30,6 +30,7 @@
37115 #include <linux/kernel.h>
37116 #include <linux/module.h>
37117 #include <linux/slab.h>
37118+#include <linux/sched.h>
37119 #include <linux/init.h>
37120 #include <linux/input.h>
37121 #include <linux/gameport.h>
37122@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
37123 unsigned char buf[SW_LENGTH];
37124 int i;
37125
37126+ pax_track_stack();
37127+
37128 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
37129
37130 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
37131diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
37132index 79e3edc..01412b9 100644
37133--- a/drivers/input/joystick/xpad.c
37134+++ b/drivers/input/joystick/xpad.c
37135@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
37136
37137 static int xpad_led_probe(struct usb_xpad *xpad)
37138 {
37139- static atomic_t led_seq = ATOMIC_INIT(0);
37140+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
37141 long led_no;
37142 struct xpad_led *led;
37143 struct led_classdev *led_cdev;
37144@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
37145 if (!led)
37146 return -ENOMEM;
37147
37148- led_no = (long)atomic_inc_return(&led_seq) - 1;
37149+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
37150
37151 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
37152 led->xpad = xpad;
37153diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
37154index 0236f0d..c7327f1 100644
37155--- a/drivers/input/serio/serio.c
37156+++ b/drivers/input/serio/serio.c
37157@@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev)
37158 */
37159 static void serio_init_port(struct serio *serio)
37160 {
37161- static atomic_t serio_no = ATOMIC_INIT(0);
37162+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
37163
37164 __module_get(THIS_MODULE);
37165
37166@@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio)
37167 mutex_init(&serio->drv_mutex);
37168 device_initialize(&serio->dev);
37169 dev_set_name(&serio->dev, "serio%ld",
37170- (long)atomic_inc_return(&serio_no) - 1);
37171+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
37172 serio->dev.bus = &serio_bus;
37173 serio->dev.release = serio_release_port;
37174 if (serio->parent) {
37175diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
37176index 33dcd8d..2783d25 100644
37177--- a/drivers/isdn/gigaset/common.c
37178+++ b/drivers/isdn/gigaset/common.c
37179@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
37180 cs->commands_pending = 0;
37181 cs->cur_at_seq = 0;
37182 cs->gotfwver = -1;
37183- cs->open_count = 0;
37184+ local_set(&cs->open_count, 0);
37185 cs->dev = NULL;
37186 cs->tty = NULL;
37187 cs->tty_dev = NULL;
37188diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
37189index a2f6125..6a70677 100644
37190--- a/drivers/isdn/gigaset/gigaset.h
37191+++ b/drivers/isdn/gigaset/gigaset.h
37192@@ -34,6 +34,7 @@
37193 #include <linux/tty_driver.h>
37194 #include <linux/list.h>
37195 #include <asm/atomic.h>
37196+#include <asm/local.h>
37197
37198 #define GIG_VERSION {0,5,0,0}
37199 #define GIG_COMPAT {0,4,0,0}
37200@@ -446,7 +447,7 @@ struct cardstate {
37201 spinlock_t cmdlock;
37202 unsigned curlen, cmdbytes;
37203
37204- unsigned open_count;
37205+ local_t open_count;
37206 struct tty_struct *tty;
37207 struct tasklet_struct if_wake_tasklet;
37208 unsigned control_state;
37209diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
37210index b3065b8..c7e8cc9 100644
37211--- a/drivers/isdn/gigaset/interface.c
37212+++ b/drivers/isdn/gigaset/interface.c
37213@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
37214 return -ERESTARTSYS; // FIXME -EINTR?
37215 tty->driver_data = cs;
37216
37217- ++cs->open_count;
37218-
37219- if (cs->open_count == 1) {
37220+ if (local_inc_return(&cs->open_count) == 1) {
37221 spin_lock_irqsave(&cs->lock, flags);
37222 cs->tty = tty;
37223 spin_unlock_irqrestore(&cs->lock, flags);
37224@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
37225
37226 if (!cs->connected)
37227 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
37228- else if (!cs->open_count)
37229+ else if (!local_read(&cs->open_count))
37230 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37231 else {
37232- if (!--cs->open_count) {
37233+ if (!local_dec_return(&cs->open_count)) {
37234 spin_lock_irqsave(&cs->lock, flags);
37235 cs->tty = NULL;
37236 spin_unlock_irqrestore(&cs->lock, flags);
37237@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
37238 if (!cs->connected) {
37239 gig_dbg(DEBUG_IF, "not connected");
37240 retval = -ENODEV;
37241- } else if (!cs->open_count)
37242+ } else if (!local_read(&cs->open_count))
37243 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37244 else {
37245 retval = 0;
37246@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
37247 if (!cs->connected) {
37248 gig_dbg(DEBUG_IF, "not connected");
37249 retval = -ENODEV;
37250- } else if (!cs->open_count)
37251+ } else if (!local_read(&cs->open_count))
37252 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37253 else if (cs->mstate != MS_LOCKED) {
37254 dev_warn(cs->dev, "can't write to unlocked device\n");
37255@@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty)
37256 if (!cs->connected) {
37257 gig_dbg(DEBUG_IF, "not connected");
37258 retval = -ENODEV;
37259- } else if (!cs->open_count)
37260+ } else if (!local_read(&cs->open_count))
37261 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37262 else if (cs->mstate != MS_LOCKED) {
37263 dev_warn(cs->dev, "can't write to unlocked device\n");
37264@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
37265
37266 if (!cs->connected)
37267 gig_dbg(DEBUG_IF, "not connected");
37268- else if (!cs->open_count)
37269+ else if (!local_read(&cs->open_count))
37270 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37271 else if (cs->mstate != MS_LOCKED)
37272 dev_warn(cs->dev, "can't write to unlocked device\n");
37273@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
37274
37275 if (!cs->connected)
37276 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
37277- else if (!cs->open_count)
37278+ else if (!local_read(&cs->open_count))
37279 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37280 else {
37281 //FIXME
37282@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty)
37283
37284 if (!cs->connected)
37285 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
37286- else if (!cs->open_count)
37287+ else if (!local_read(&cs->open_count))
37288 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37289 else {
37290 //FIXME
37291@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
37292 goto out;
37293 }
37294
37295- if (!cs->open_count) {
37296+ if (!local_read(&cs->open_count)) {
37297 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37298 goto out;
37299 }
37300diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
37301index a7c0083..62a7cb6 100644
37302--- a/drivers/isdn/hardware/avm/b1.c
37303+++ b/drivers/isdn/hardware/avm/b1.c
37304@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
37305 }
37306 if (left) {
37307 if (t4file->user) {
37308- if (copy_from_user(buf, dp, left))
37309+ if (left > sizeof buf || copy_from_user(buf, dp, left))
37310 return -EFAULT;
37311 } else {
37312 memcpy(buf, dp, left);
37313@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
37314 }
37315 if (left) {
37316 if (config->user) {
37317- if (copy_from_user(buf, dp, left))
37318+ if (left > sizeof buf || copy_from_user(buf, dp, left))
37319 return -EFAULT;
37320 } else {
37321 memcpy(buf, dp, left);
37322diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
37323index f130724..c373c68 100644
37324--- a/drivers/isdn/hardware/eicon/capidtmf.c
37325+++ b/drivers/isdn/hardware/eicon/capidtmf.c
37326@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
37327 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
37328 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
37329
37330+ pax_track_stack();
37331
37332 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
37333 {
37334diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
37335index 4d425c6..a9be6c4 100644
37336--- a/drivers/isdn/hardware/eicon/capifunc.c
37337+++ b/drivers/isdn/hardware/eicon/capifunc.c
37338@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
37339 IDI_SYNC_REQ req;
37340 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
37341
37342+ pax_track_stack();
37343+
37344 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
37345
37346 for (x = 0; x < MAX_DESCRIPTORS; x++) {
37347diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
37348index 3029234..ef0d9e2 100644
37349--- a/drivers/isdn/hardware/eicon/diddfunc.c
37350+++ b/drivers/isdn/hardware/eicon/diddfunc.c
37351@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
37352 IDI_SYNC_REQ req;
37353 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
37354
37355+ pax_track_stack();
37356+
37357 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
37358
37359 for (x = 0; x < MAX_DESCRIPTORS; x++) {
37360diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
37361index d36a4c0..11e7d1a 100644
37362--- a/drivers/isdn/hardware/eicon/divasfunc.c
37363+++ b/drivers/isdn/hardware/eicon/divasfunc.c
37364@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
37365 IDI_SYNC_REQ req;
37366 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
37367
37368+ pax_track_stack();
37369+
37370 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
37371
37372 for (x = 0; x < MAX_DESCRIPTORS; x++) {
37373diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
37374index 85784a7..a19ca98 100644
37375--- a/drivers/isdn/hardware/eicon/divasync.h
37376+++ b/drivers/isdn/hardware/eicon/divasync.h
37377@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
37378 } diva_didd_add_adapter_t;
37379 typedef struct _diva_didd_remove_adapter {
37380 IDI_CALL p_request;
37381-} diva_didd_remove_adapter_t;
37382+} __no_const diva_didd_remove_adapter_t;
37383 typedef struct _diva_didd_read_adapter_array {
37384 void * buffer;
37385 dword length;
37386diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
37387index db87d51..7d09acf 100644
37388--- a/drivers/isdn/hardware/eicon/idifunc.c
37389+++ b/drivers/isdn/hardware/eicon/idifunc.c
37390@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
37391 IDI_SYNC_REQ req;
37392 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
37393
37394+ pax_track_stack();
37395+
37396 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
37397
37398 for (x = 0; x < MAX_DESCRIPTORS; x++) {
37399diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
37400index ae89fb8..0fab299 100644
37401--- a/drivers/isdn/hardware/eicon/message.c
37402+++ b/drivers/isdn/hardware/eicon/message.c
37403@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
37404 dword d;
37405 word w;
37406
37407+ pax_track_stack();
37408+
37409 a = plci->adapter;
37410 Id = ((word)plci->Id<<8)|a->Id;
37411 PUT_WORD(&SS_Ind[4],0x0000);
37412@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
37413 word j, n, w;
37414 dword d;
37415
37416+ pax_track_stack();
37417+
37418
37419 for(i=0;i<8;i++) bp_parms[i].length = 0;
37420 for(i=0;i<2;i++) global_config[i].length = 0;
37421@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
37422 const byte llc3[] = {4,3,2,2,6,6,0};
37423 const byte header[] = {0,2,3,3,0,0,0};
37424
37425+ pax_track_stack();
37426+
37427 for(i=0;i<8;i++) bp_parms[i].length = 0;
37428 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
37429 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
37430@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
37431 word appl_number_group_type[MAX_APPL];
37432 PLCI *auxplci;
37433
37434+ pax_track_stack();
37435+
37436 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
37437
37438 if(!a->group_optimization_enabled)
37439diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
37440index a564b75..f3cf8b5 100644
37441--- a/drivers/isdn/hardware/eicon/mntfunc.c
37442+++ b/drivers/isdn/hardware/eicon/mntfunc.c
37443@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
37444 IDI_SYNC_REQ req;
37445 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
37446
37447+ pax_track_stack();
37448+
37449 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
37450
37451 for (x = 0; x < MAX_DESCRIPTORS; x++) {
37452diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
37453index a3bd163..8956575 100644
37454--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
37455+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
37456@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
37457 typedef struct _diva_os_idi_adapter_interface {
37458 diva_init_card_proc_t cleanup_adapter_proc;
37459 diva_cmd_card_proc_t cmd_proc;
37460-} diva_os_idi_adapter_interface_t;
37461+} __no_const diva_os_idi_adapter_interface_t;
37462
37463 typedef struct _diva_os_xdi_adapter {
37464 struct list_head link;
37465diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
37466index adb1e8c..21b590b 100644
37467--- a/drivers/isdn/i4l/isdn_common.c
37468+++ b/drivers/isdn/i4l/isdn_common.c
37469@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
37470 } iocpar;
37471 void __user *argp = (void __user *)arg;
37472
37473+ pax_track_stack();
37474+
37475 #define name iocpar.name
37476 #define bname iocpar.bname
37477 #define iocts iocpar.iocts
37478diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
37479index 90b56ed..5ed3305 100644
37480--- a/drivers/isdn/i4l/isdn_net.c
37481+++ b/drivers/isdn/i4l/isdn_net.c
37482@@ -1902,7 +1902,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
37483 {
37484 isdn_net_local *lp = netdev_priv(dev);
37485 unsigned char *p;
37486- ushort len = 0;
37487+ int len = 0;
37488
37489 switch (lp->p_encap) {
37490 case ISDN_NET_ENCAP_ETHER:
37491diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
37492index bf7997a..cf091db 100644
37493--- a/drivers/isdn/icn/icn.c
37494+++ b/drivers/isdn/icn/icn.c
37495@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
37496 if (count > len)
37497 count = len;
37498 if (user) {
37499- if (copy_from_user(msg, buf, count))
37500+ if (count > sizeof msg || copy_from_user(msg, buf, count))
37501 return -EFAULT;
37502 } else
37503 memcpy(msg, buf, count);
37504diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
37505index feb0fa4..f76f830 100644
37506--- a/drivers/isdn/mISDN/socket.c
37507+++ b/drivers/isdn/mISDN/socket.c
37508@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
37509 if (dev) {
37510 struct mISDN_devinfo di;
37511
37512+ memset(&di, 0, sizeof(di));
37513 di.id = dev->id;
37514 di.Dprotocols = dev->Dprotocols;
37515 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
37516@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
37517 if (dev) {
37518 struct mISDN_devinfo di;
37519
37520+ memset(&di, 0, sizeof(di));
37521 di.id = dev->id;
37522 di.Dprotocols = dev->Dprotocols;
37523 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
37524diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
37525index 485be8b..f0225bc 100644
37526--- a/drivers/isdn/sc/interrupt.c
37527+++ b/drivers/isdn/sc/interrupt.c
37528@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
37529 }
37530 else if(callid>=0x0000 && callid<=0x7FFF)
37531 {
37532+ int len;
37533+
37534 pr_debug("%s: Got Incoming Call\n",
37535 sc_adapter[card]->devicename);
37536- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
37537- strcpy(setup.eazmsn,
37538- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
37539+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
37540+ sizeof(setup.phone));
37541+ if (len >= sizeof(setup.phone))
37542+ continue;
37543+ len = strlcpy(setup.eazmsn,
37544+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
37545+ sizeof(setup.eazmsn));
37546+ if (len >= sizeof(setup.eazmsn))
37547+ continue;
37548 setup.si1 = 7;
37549 setup.si2 = 0;
37550 setup.plan = 0;
37551@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
37552 * Handle a GetMyNumber Rsp
37553 */
37554 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
37555- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
37556+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
37557+ rcvmsg.msg_data.byte_array,
37558+ sizeof(rcvmsg.msg_data.byte_array));
37559 continue;
37560 }
37561
37562diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
37563index 8744d24..d1f9a9a 100644
37564--- a/drivers/lguest/core.c
37565+++ b/drivers/lguest/core.c
37566@@ -91,9 +91,17 @@ static __init int map_switcher(void)
37567 * it's worked so far. The end address needs +1 because __get_vm_area
37568 * allocates an extra guard page, so we need space for that.
37569 */
37570+
37571+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
37572+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37573+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
37574+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37575+#else
37576 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37577 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
37578 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37579+#endif
37580+
37581 if (!switcher_vma) {
37582 err = -ENOMEM;
37583 printk("lguest: could not map switcher pages high\n");
37584@@ -118,7 +126,7 @@ static __init int map_switcher(void)
37585 * Now the Switcher is mapped at the right address, we can't fail!
37586 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
37587 */
37588- memcpy(switcher_vma->addr, start_switcher_text,
37589+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
37590 end_switcher_text - start_switcher_text);
37591
37592 printk(KERN_INFO "lguest: mapped switcher at %p\n",
37593diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
37594index bd16323..ab460f7 100644
37595--- a/drivers/lguest/lguest_user.c
37596+++ b/drivers/lguest/lguest_user.c
37597@@ -194,6 +194,7 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
37598 * Once our Guest is initialized, the Launcher makes it run by reading
37599 * from /dev/lguest.
37600 */
37601+static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) __size_overflow(3);
37602 static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
37603 {
37604 struct lguest *lg = file->private_data;
37605diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
37606index 6ae3888..8b38145 100644
37607--- a/drivers/lguest/x86/core.c
37608+++ b/drivers/lguest/x86/core.c
37609@@ -59,7 +59,7 @@ static struct {
37610 /* Offset from where switcher.S was compiled to where we've copied it */
37611 static unsigned long switcher_offset(void)
37612 {
37613- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
37614+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
37615 }
37616
37617 /* This cpu's struct lguest_pages. */
37618@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
37619 * These copies are pretty cheap, so we do them unconditionally: */
37620 /* Save the current Host top-level page directory.
37621 */
37622+
37623+#ifdef CONFIG_PAX_PER_CPU_PGD
37624+ pages->state.host_cr3 = read_cr3();
37625+#else
37626 pages->state.host_cr3 = __pa(current->mm->pgd);
37627+#endif
37628+
37629 /*
37630 * Set up the Guest's page tables to see this CPU's pages (and no
37631 * other CPU's pages).
37632@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
37633 * compiled-in switcher code and the high-mapped copy we just made.
37634 */
37635 for (i = 0; i < IDT_ENTRIES; i++)
37636- default_idt_entries[i] += switcher_offset();
37637+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
37638
37639 /*
37640 * Set up the Switcher's per-cpu areas.
37641@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
37642 * it will be undisturbed when we switch. To change %cs and jump we
37643 * need this structure to feed to Intel's "lcall" instruction.
37644 */
37645- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
37646+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
37647 lguest_entry.segment = LGUEST_CS;
37648
37649 /*
37650diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
37651index 40634b0..4f5855e 100644
37652--- a/drivers/lguest/x86/switcher_32.S
37653+++ b/drivers/lguest/x86/switcher_32.S
37654@@ -87,6 +87,7 @@
37655 #include <asm/page.h>
37656 #include <asm/segment.h>
37657 #include <asm/lguest.h>
37658+#include <asm/processor-flags.h>
37659
37660 // We mark the start of the code to copy
37661 // It's placed in .text tho it's never run here
37662@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
37663 // Changes type when we load it: damn Intel!
37664 // For after we switch over our page tables
37665 // That entry will be read-only: we'd crash.
37666+
37667+#ifdef CONFIG_PAX_KERNEXEC
37668+ mov %cr0, %edx
37669+ xor $X86_CR0_WP, %edx
37670+ mov %edx, %cr0
37671+#endif
37672+
37673 movl $(GDT_ENTRY_TSS*8), %edx
37674 ltr %dx
37675
37676@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
37677 // Let's clear it again for our return.
37678 // The GDT descriptor of the Host
37679 // Points to the table after two "size" bytes
37680- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
37681+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
37682 // Clear "used" from type field (byte 5, bit 2)
37683- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
37684+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
37685+
37686+#ifdef CONFIG_PAX_KERNEXEC
37687+ mov %cr0, %eax
37688+ xor $X86_CR0_WP, %eax
37689+ mov %eax, %cr0
37690+#endif
37691
37692 // Once our page table's switched, the Guest is live!
37693 // The Host fades as we run this final step.
37694@@ -295,13 +309,12 @@ deliver_to_host:
37695 // I consulted gcc, and it gave
37696 // These instructions, which I gladly credit:
37697 leal (%edx,%ebx,8), %eax
37698- movzwl (%eax),%edx
37699- movl 4(%eax), %eax
37700- xorw %ax, %ax
37701- orl %eax, %edx
37702+ movl 4(%eax), %edx
37703+ movw (%eax), %dx
37704 // Now the address of the handler's in %edx
37705 // We call it now: its "iret" drops us home.
37706- jmp *%edx
37707+ ljmp $__KERNEL_CS, $1f
37708+1: jmp *%edx
37709
37710 // Every interrupt can come to us here
37711 // But we must truly tell each apart.
37712diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
37713index 588a5b0..b71db89 100644
37714--- a/drivers/macintosh/macio_asic.c
37715+++ b/drivers/macintosh/macio_asic.c
37716@@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
37717 * MacIO is matched against any Apple ID, it's probe() function
37718 * will then decide wether it applies or not
37719 */
37720-static const struct pci_device_id __devinitdata pci_ids [] = { {
37721+static const struct pci_device_id __devinitconst pci_ids [] = { {
37722 .vendor = PCI_VENDOR_ID_APPLE,
37723 .device = PCI_ANY_ID,
37724 .subvendor = PCI_ANY_ID,
37725diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
37726index a348bb0..ecd9b3f 100644
37727--- a/drivers/macintosh/via-pmu-backlight.c
37728+++ b/drivers/macintosh/via-pmu-backlight.c
37729@@ -15,7 +15,7 @@
37730
37731 #define MAX_PMU_LEVEL 0xFF
37732
37733-static struct backlight_ops pmu_backlight_data;
37734+static const struct backlight_ops pmu_backlight_data;
37735 static DEFINE_SPINLOCK(pmu_backlight_lock);
37736 static int sleeping, uses_pmu_bl;
37737 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
37738@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
37739 return bd->props.brightness;
37740 }
37741
37742-static struct backlight_ops pmu_backlight_data = {
37743+static const struct backlight_ops pmu_backlight_data = {
37744 .get_brightness = pmu_backlight_get_brightness,
37745 .update_status = pmu_backlight_update_status,
37746
37747diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
37748index 6f308a4..b5f7ff7 100644
37749--- a/drivers/macintosh/via-pmu.c
37750+++ b/drivers/macintosh/via-pmu.c
37751@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state)
37752 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
37753 }
37754
37755-static struct platform_suspend_ops pmu_pm_ops = {
37756+static const struct platform_suspend_ops pmu_pm_ops = {
37757 .enter = powerbook_sleep,
37758 .valid = pmu_sleep_valid,
37759 };
37760diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
37761index 818b617..4656e38 100644
37762--- a/drivers/md/dm-ioctl.c
37763+++ b/drivers/md/dm-ioctl.c
37764@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
37765 cmd == DM_LIST_VERSIONS_CMD)
37766 return 0;
37767
37768- if ((cmd == DM_DEV_CREATE_CMD)) {
37769+ if (cmd == DM_DEV_CREATE_CMD) {
37770 if (!*param->name) {
37771 DMWARN("name not supplied when creating device");
37772 return -EINVAL;
37773diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
37774index 6021d0a..a878643 100644
37775--- a/drivers/md/dm-raid1.c
37776+++ b/drivers/md/dm-raid1.c
37777@@ -41,7 +41,7 @@ enum dm_raid1_error {
37778
37779 struct mirror {
37780 struct mirror_set *ms;
37781- atomic_t error_count;
37782+ atomic_unchecked_t error_count;
37783 unsigned long error_type;
37784 struct dm_dev *dev;
37785 sector_t offset;
37786@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
37787 * simple way to tell if a device has encountered
37788 * errors.
37789 */
37790- atomic_inc(&m->error_count);
37791+ atomic_inc_unchecked(&m->error_count);
37792
37793 if (test_and_set_bit(error_type, &m->error_type))
37794 return;
37795@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
37796 }
37797
37798 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
37799- if (!atomic_read(&new->error_count)) {
37800+ if (!atomic_read_unchecked(&new->error_count)) {
37801 set_default_mirror(new);
37802 break;
37803 }
37804@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
37805 struct mirror *m = get_default_mirror(ms);
37806
37807 do {
37808- if (likely(!atomic_read(&m->error_count)))
37809+ if (likely(!atomic_read_unchecked(&m->error_count)))
37810 return m;
37811
37812 if (m-- == ms->mirror)
37813@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
37814 {
37815 struct mirror *default_mirror = get_default_mirror(m->ms);
37816
37817- return !atomic_read(&default_mirror->error_count);
37818+ return !atomic_read_unchecked(&default_mirror->error_count);
37819 }
37820
37821 static int mirror_available(struct mirror_set *ms, struct bio *bio)
37822@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
37823 */
37824 if (likely(region_in_sync(ms, region, 1)))
37825 m = choose_mirror(ms, bio->bi_sector);
37826- else if (m && atomic_read(&m->error_count))
37827+ else if (m && atomic_read_unchecked(&m->error_count))
37828 m = NULL;
37829
37830 if (likely(m))
37831@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
37832 }
37833
37834 ms->mirror[mirror].ms = ms;
37835- atomic_set(&(ms->mirror[mirror].error_count), 0);
37836+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
37837 ms->mirror[mirror].error_type = 0;
37838 ms->mirror[mirror].offset = offset;
37839
37840@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti)
37841 */
37842 static char device_status_char(struct mirror *m)
37843 {
37844- if (!atomic_read(&(m->error_count)))
37845+ if (!atomic_read_unchecked(&(m->error_count)))
37846 return 'A';
37847
37848 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
37849diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
37850index bd58703..9f26571 100644
37851--- a/drivers/md/dm-stripe.c
37852+++ b/drivers/md/dm-stripe.c
37853@@ -20,7 +20,7 @@ struct stripe {
37854 struct dm_dev *dev;
37855 sector_t physical_start;
37856
37857- atomic_t error_count;
37858+ atomic_unchecked_t error_count;
37859 };
37860
37861 struct stripe_c {
37862@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
37863 kfree(sc);
37864 return r;
37865 }
37866- atomic_set(&(sc->stripe[i].error_count), 0);
37867+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
37868 }
37869
37870 ti->private = sc;
37871@@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti,
37872 DMEMIT("%d ", sc->stripes);
37873 for (i = 0; i < sc->stripes; i++) {
37874 DMEMIT("%s ", sc->stripe[i].dev->name);
37875- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
37876+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
37877 'D' : 'A';
37878 }
37879 buffer[i] = '\0';
37880@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
37881 */
37882 for (i = 0; i < sc->stripes; i++)
37883 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
37884- atomic_inc(&(sc->stripe[i].error_count));
37885- if (atomic_read(&(sc->stripe[i].error_count)) <
37886+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
37887+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
37888 DM_IO_ERROR_THRESHOLD)
37889 queue_work(kstriped, &sc->kstriped_ws);
37890 }
37891diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
37892index 4b04590..13a77b2 100644
37893--- a/drivers/md/dm-sysfs.c
37894+++ b/drivers/md/dm-sysfs.c
37895@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
37896 NULL,
37897 };
37898
37899-static struct sysfs_ops dm_sysfs_ops = {
37900+static const struct sysfs_ops dm_sysfs_ops = {
37901 .show = dm_attr_show,
37902 };
37903
37904diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
37905index 03345bb..332250d 100644
37906--- a/drivers/md/dm-table.c
37907+++ b/drivers/md/dm-table.c
37908@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
37909 if (!dev_size)
37910 return 0;
37911
37912- if ((start >= dev_size) || (start + len > dev_size)) {
37913+ if ((start >= dev_size) || (len > dev_size - start)) {
37914 DMWARN("%s: %s too small for target: "
37915 "start=%llu, len=%llu, dev_size=%llu",
37916 dm_device_name(ti->table->md), bdevname(bdev, b),
37917diff --git a/drivers/md/dm.c b/drivers/md/dm.c
37918index c988ac2..c418141 100644
37919--- a/drivers/md/dm.c
37920+++ b/drivers/md/dm.c
37921@@ -165,9 +165,9 @@ struct mapped_device {
37922 /*
37923 * Event handling.
37924 */
37925- atomic_t event_nr;
37926+ atomic_unchecked_t event_nr;
37927 wait_queue_head_t eventq;
37928- atomic_t uevent_seq;
37929+ atomic_unchecked_t uevent_seq;
37930 struct list_head uevent_list;
37931 spinlock_t uevent_lock; /* Protect access to uevent_list */
37932
37933@@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor)
37934 rwlock_init(&md->map_lock);
37935 atomic_set(&md->holders, 1);
37936 atomic_set(&md->open_count, 0);
37937- atomic_set(&md->event_nr, 0);
37938- atomic_set(&md->uevent_seq, 0);
37939+ atomic_set_unchecked(&md->event_nr, 0);
37940+ atomic_set_unchecked(&md->uevent_seq, 0);
37941 INIT_LIST_HEAD(&md->uevent_list);
37942 spin_lock_init(&md->uevent_lock);
37943
37944@@ -1927,7 +1927,7 @@ static void event_callback(void *context)
37945
37946 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
37947
37948- atomic_inc(&md->event_nr);
37949+ atomic_inc_unchecked(&md->event_nr);
37950 wake_up(&md->eventq);
37951 }
37952
37953@@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
37954
37955 uint32_t dm_next_uevent_seq(struct mapped_device *md)
37956 {
37957- return atomic_add_return(1, &md->uevent_seq);
37958+ return atomic_add_return_unchecked(1, &md->uevent_seq);
37959 }
37960
37961 uint32_t dm_get_event_nr(struct mapped_device *md)
37962 {
37963- return atomic_read(&md->event_nr);
37964+ return atomic_read_unchecked(&md->event_nr);
37965 }
37966
37967 int dm_wait_event(struct mapped_device *md, int event_nr)
37968 {
37969 return wait_event_interruptible(md->eventq,
37970- (event_nr != atomic_read(&md->event_nr)));
37971+ (event_nr != atomic_read_unchecked(&md->event_nr)));
37972 }
37973
37974 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
37975diff --git a/drivers/md/md.c b/drivers/md/md.c
37976index 4ce6e2f..7a9530a 100644
37977--- a/drivers/md/md.c
37978+++ b/drivers/md/md.c
37979@@ -153,10 +153,10 @@ static int start_readonly;
37980 * start build, activate spare
37981 */
37982 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
37983-static atomic_t md_event_count;
37984+static atomic_unchecked_t md_event_count;
37985 void md_new_event(mddev_t *mddev)
37986 {
37987- atomic_inc(&md_event_count);
37988+ atomic_inc_unchecked(&md_event_count);
37989 wake_up(&md_event_waiters);
37990 }
37991 EXPORT_SYMBOL_GPL(md_new_event);
37992@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
37993 */
37994 static void md_new_event_inintr(mddev_t *mddev)
37995 {
37996- atomic_inc(&md_event_count);
37997+ atomic_inc_unchecked(&md_event_count);
37998 wake_up(&md_event_waiters);
37999 }
38000
38001@@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
38002
38003 rdev->preferred_minor = 0xffff;
38004 rdev->data_offset = le64_to_cpu(sb->data_offset);
38005- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
38006+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
38007
38008 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
38009 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
38010@@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
38011 else
38012 sb->resync_offset = cpu_to_le64(0);
38013
38014- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
38015+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
38016
38017 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
38018 sb->size = cpu_to_le64(mddev->dev_sectors);
38019@@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
38020 static ssize_t
38021 errors_show(mdk_rdev_t *rdev, char *page)
38022 {
38023- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
38024+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
38025 }
38026
38027 static ssize_t
38028@@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
38029 char *e;
38030 unsigned long n = simple_strtoul(buf, &e, 10);
38031 if (*buf && (*e == 0 || *e == '\n')) {
38032- atomic_set(&rdev->corrected_errors, n);
38033+ atomic_set_unchecked(&rdev->corrected_errors, n);
38034 return len;
38035 }
38036 return -EINVAL;
38037@@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko)
38038 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
38039 kfree(rdev);
38040 }
38041-static struct sysfs_ops rdev_sysfs_ops = {
38042+static const struct sysfs_ops rdev_sysfs_ops = {
38043 .show = rdev_attr_show,
38044 .store = rdev_attr_store,
38045 };
38046@@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
38047 rdev->data_offset = 0;
38048 rdev->sb_events = 0;
38049 atomic_set(&rdev->nr_pending, 0);
38050- atomic_set(&rdev->read_errors, 0);
38051- atomic_set(&rdev->corrected_errors, 0);
38052+ atomic_set_unchecked(&rdev->read_errors, 0);
38053+ atomic_set_unchecked(&rdev->corrected_errors, 0);
38054
38055 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
38056 if (!size) {
38057@@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
38058 kfree(mddev);
38059 }
38060
38061-static struct sysfs_ops md_sysfs_ops = {
38062+static const struct sysfs_ops md_sysfs_ops = {
38063 .show = md_attr_show,
38064 .store = md_attr_store,
38065 };
38066@@ -4482,7 +4482,8 @@ out:
38067 err = 0;
38068 blk_integrity_unregister(disk);
38069 md_new_event(mddev);
38070- sysfs_notify_dirent(mddev->sysfs_state);
38071+ if (mddev->sysfs_state)
38072+ sysfs_notify_dirent(mddev->sysfs_state);
38073 return err;
38074 }
38075
38076@@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
38077
38078 spin_unlock(&pers_lock);
38079 seq_printf(seq, "\n");
38080- mi->event = atomic_read(&md_event_count);
38081+ mi->event = atomic_read_unchecked(&md_event_count);
38082 return 0;
38083 }
38084 if (v == (void*)2) {
38085@@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
38086 chunk_kb ? "KB" : "B");
38087 if (bitmap->file) {
38088 seq_printf(seq, ", file: ");
38089- seq_path(seq, &bitmap->file->f_path, " \t\n");
38090+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
38091 }
38092
38093 seq_printf(seq, "\n");
38094@@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
38095 else {
38096 struct seq_file *p = file->private_data;
38097 p->private = mi;
38098- mi->event = atomic_read(&md_event_count);
38099+ mi->event = atomic_read_unchecked(&md_event_count);
38100 }
38101 return error;
38102 }
38103@@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
38104 /* always allow read */
38105 mask = POLLIN | POLLRDNORM;
38106
38107- if (mi->event != atomic_read(&md_event_count))
38108+ if (mi->event != atomic_read_unchecked(&md_event_count))
38109 mask |= POLLERR | POLLPRI;
38110 return mask;
38111 }
38112@@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
38113 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
38114 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38115 (int)part_stat_read(&disk->part0, sectors[1]) -
38116- atomic_read(&disk->sync_io);
38117+ atomic_read_unchecked(&disk->sync_io);
38118 /* sync IO will cause sync_io to increase before the disk_stats
38119 * as sync_io is counted when a request starts, and
38120 * disk_stats is counted when it completes.
38121diff --git a/drivers/md/md.h b/drivers/md/md.h
38122index 87430fe..0024a4c 100644
38123--- a/drivers/md/md.h
38124+++ b/drivers/md/md.h
38125@@ -94,10 +94,10 @@ struct mdk_rdev_s
38126 * only maintained for arrays that
38127 * support hot removal
38128 */
38129- atomic_t read_errors; /* number of consecutive read errors that
38130+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
38131 * we have tried to ignore.
38132 */
38133- atomic_t corrected_errors; /* number of corrected read errors,
38134+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
38135 * for reporting to userspace and storing
38136 * in superblock.
38137 */
38138@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
38139
38140 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
38141 {
38142- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38143+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38144 }
38145
38146 struct mdk_personality
38147diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
38148index 968cb14..f0ad2e4 100644
38149--- a/drivers/md/raid1.c
38150+++ b/drivers/md/raid1.c
38151@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
38152 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
38153 continue;
38154 rdev = conf->mirrors[d].rdev;
38155- atomic_add(s, &rdev->corrected_errors);
38156+ atomic_add_unchecked(s, &rdev->corrected_errors);
38157 if (sync_page_io(rdev->bdev,
38158 sect + rdev->data_offset,
38159 s<<9,
38160@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
38161 /* Well, this device is dead */
38162 md_error(mddev, rdev);
38163 else {
38164- atomic_add(s, &rdev->corrected_errors);
38165+ atomic_add_unchecked(s, &rdev->corrected_errors);
38166 printk(KERN_INFO
38167 "raid1:%s: read error corrected "
38168 "(%d sectors at %llu on %s)\n",
38169diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
38170index 1b4e232..cf0f534b 100644
38171--- a/drivers/md/raid10.c
38172+++ b/drivers/md/raid10.c
38173@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error)
38174 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
38175 set_bit(R10BIO_Uptodate, &r10_bio->state);
38176 else {
38177- atomic_add(r10_bio->sectors,
38178+ atomic_add_unchecked(r10_bio->sectors,
38179 &conf->mirrors[d].rdev->corrected_errors);
38180 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
38181 md_error(r10_bio->mddev,
38182@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
38183 test_bit(In_sync, &rdev->flags)) {
38184 atomic_inc(&rdev->nr_pending);
38185 rcu_read_unlock();
38186- atomic_add(s, &rdev->corrected_errors);
38187+ atomic_add_unchecked(s, &rdev->corrected_errors);
38188 if (sync_page_io(rdev->bdev,
38189 r10_bio->devs[sl].addr +
38190 sect + rdev->data_offset,
38191diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
38192index 883215d..675bf47 100644
38193--- a/drivers/md/raid5.c
38194+++ b/drivers/md/raid5.c
38195@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
38196 bi->bi_next = NULL;
38197 if ((rw & WRITE) &&
38198 test_bit(R5_ReWrite, &sh->dev[i].flags))
38199- atomic_add(STRIPE_SECTORS,
38200+ atomic_add_unchecked(STRIPE_SECTORS,
38201 &rdev->corrected_errors);
38202 generic_make_request(bi);
38203 } else {
38204@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
38205 clear_bit(R5_ReadError, &sh->dev[i].flags);
38206 clear_bit(R5_ReWrite, &sh->dev[i].flags);
38207 }
38208- if (atomic_read(&conf->disks[i].rdev->read_errors))
38209- atomic_set(&conf->disks[i].rdev->read_errors, 0);
38210+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
38211+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
38212 } else {
38213 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
38214 int retry = 0;
38215 rdev = conf->disks[i].rdev;
38216
38217 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
38218- atomic_inc(&rdev->read_errors);
38219+ atomic_inc_unchecked(&rdev->read_errors);
38220 if (conf->mddev->degraded >= conf->max_degraded)
38221 printk_rl(KERN_WARNING
38222 "raid5:%s: read error not correctable "
38223@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
38224 (unsigned long long)(sh->sector
38225 + rdev->data_offset),
38226 bdn);
38227- else if (atomic_read(&rdev->read_errors)
38228+ else if (atomic_read_unchecked(&rdev->read_errors)
38229 > conf->max_nr_stripes)
38230 printk(KERN_WARNING
38231 "raid5:%s: Too many read errors, failing device %s.\n",
38232@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
38233 sector_t r_sector;
38234 struct stripe_head sh2;
38235
38236+ pax_track_stack();
38237
38238 chunk_offset = sector_div(new_sector, sectors_per_chunk);
38239 stripe = new_sector;
38240diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
38241index 05bde9c..2f31d40 100644
38242--- a/drivers/media/common/saa7146_hlp.c
38243+++ b/drivers/media/common/saa7146_hlp.c
38244@@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
38245
38246 int x[32], y[32], w[32], h[32];
38247
38248+ pax_track_stack();
38249+
38250 /* clear out memory */
38251 memset(&line_list[0], 0x00, sizeof(u32)*32);
38252 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
38253diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
38254index cb22da5..82b686e 100644
38255--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
38256+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
38257@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
38258 u8 buf[HOST_LINK_BUF_SIZE];
38259 int i;
38260
38261+ pax_track_stack();
38262+
38263 dprintk("%s\n", __func__);
38264
38265 /* check if we have space for a link buf in the rx_buffer */
38266@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
38267 unsigned long timeout;
38268 int written;
38269
38270+ pax_track_stack();
38271+
38272 dprintk("%s\n", __func__);
38273
38274 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
38275diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
38276index 2fe05d0..a3289c4 100644
38277--- a/drivers/media/dvb/dvb-core/dvb_demux.h
38278+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
38279@@ -71,7 +71,7 @@ struct dvb_demux_feed {
38280 union {
38281 dmx_ts_cb ts;
38282 dmx_section_cb sec;
38283- } cb;
38284+ } __no_const cb;
38285
38286 struct dvb_demux *demux;
38287 void *priv;
38288diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
38289index 94159b9..376bd8e 100644
38290--- a/drivers/media/dvb/dvb-core/dvbdev.c
38291+++ b/drivers/media/dvb/dvb-core/dvbdev.c
38292@@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
38293 const struct dvb_device *template, void *priv, int type)
38294 {
38295 struct dvb_device *dvbdev;
38296- struct file_operations *dvbdevfops;
38297+ file_operations_no_const *dvbdevfops;
38298 struct device *clsdev;
38299 int minor;
38300 int id;
38301diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
38302index 2a53dd0..db8c07a 100644
38303--- a/drivers/media/dvb/dvb-usb/cxusb.c
38304+++ b/drivers/media/dvb/dvb-usb/cxusb.c
38305@@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
38306 struct dib0700_adapter_state {
38307 int (*set_param_save) (struct dvb_frontend *,
38308 struct dvb_frontend_parameters *);
38309-};
38310+} __no_const;
38311
38312 static int dib7070_set_param_override(struct dvb_frontend *fe,
38313 struct dvb_frontend_parameters *fep)
38314diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
38315index db7f7f7..f55e96f 100644
38316--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
38317+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
38318@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
38319
38320 u8 buf[260];
38321
38322+ pax_track_stack();
38323+
38324 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
38325 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
38326
38327diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
38328index 524acf5..5ffc403 100644
38329--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
38330+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
38331@@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
38332
38333 struct dib0700_adapter_state {
38334 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
38335-};
38336+} __no_const;
38337
38338 /* Hauppauge Nova-T 500 (aka Bristol)
38339 * has a LNA on GPIO0 which is enabled by setting 1 */
38340diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
38341index ba91735..4261d84 100644
38342--- a/drivers/media/dvb/frontends/dib3000.h
38343+++ b/drivers/media/dvb/frontends/dib3000.h
38344@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
38345 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
38346 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
38347 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
38348-};
38349+} __no_const;
38350
38351 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
38352 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
38353diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
38354index c709ce6..b3fe620 100644
38355--- a/drivers/media/dvb/frontends/or51211.c
38356+++ b/drivers/media/dvb/frontends/or51211.c
38357@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
38358 u8 tudata[585];
38359 int i;
38360
38361+ pax_track_stack();
38362+
38363 dprintk("Firmware is %zd bytes\n",fw->size);
38364
38365 /* Get eprom data */
38366diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
38367index 482d0f3..ee1e202 100644
38368--- a/drivers/media/radio/radio-cadet.c
38369+++ b/drivers/media/radio/radio-cadet.c
38370@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
38371 while (i < count && dev->rdsin != dev->rdsout)
38372 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
38373
38374- if (copy_to_user(data, readbuf, i))
38375+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
38376 return -EFAULT;
38377 return i;
38378 }
38379diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
38380index 6dd51e2..0359b92 100644
38381--- a/drivers/media/video/cx18/cx18-driver.c
38382+++ b/drivers/media/video/cx18/cx18-driver.c
38383@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
38384
38385 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
38386
38387-static atomic_t cx18_instance = ATOMIC_INIT(0);
38388+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
38389
38390 /* Parameter declarations */
38391 static int cardtype[CX18_MAX_CARDS];
38392@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
38393 struct i2c_client c;
38394 u8 eedata[256];
38395
38396+ pax_track_stack();
38397+
38398 memset(&c, 0, sizeof(c));
38399 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
38400 c.adapter = &cx->i2c_adap[0];
38401@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
38402 struct cx18 *cx;
38403
38404 /* FIXME - module parameter arrays constrain max instances */
38405- i = atomic_inc_return(&cx18_instance) - 1;
38406+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
38407 if (i >= CX18_MAX_CARDS) {
38408 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
38409 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
38410diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
38411index 463ec34..2f4625a 100644
38412--- a/drivers/media/video/ivtv/ivtv-driver.c
38413+++ b/drivers/media/video/ivtv/ivtv-driver.c
38414@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
38415 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
38416
38417 /* ivtv instance counter */
38418-static atomic_t ivtv_instance = ATOMIC_INIT(0);
38419+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
38420
38421 /* Parameter declarations */
38422 static int cardtype[IVTV_MAX_CARDS];
38423diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
38424index 5fc4ac0..652a54a 100644
38425--- a/drivers/media/video/omap24xxcam.c
38426+++ b/drivers/media/video/omap24xxcam.c
38427@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
38428 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
38429
38430 do_gettimeofday(&vb->ts);
38431- vb->field_count = atomic_add_return(2, &fh->field_count);
38432+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
38433 if (csr & csr_error) {
38434 vb->state = VIDEOBUF_ERROR;
38435 if (!atomic_read(&fh->cam->in_reset)) {
38436diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
38437index 2ce67f5..cf26a5b 100644
38438--- a/drivers/media/video/omap24xxcam.h
38439+++ b/drivers/media/video/omap24xxcam.h
38440@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
38441 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
38442 struct videobuf_queue vbq;
38443 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
38444- atomic_t field_count; /* field counter for videobuf_buffer */
38445+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
38446 /* accessing cam here doesn't need serialisation: it's constant */
38447 struct omap24xxcam_device *cam;
38448 };
38449diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
38450index 299afa4..eb47459 100644
38451--- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
38452+++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
38453@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
38454 u8 *eeprom;
38455 struct tveeprom tvdata;
38456
38457+ pax_track_stack();
38458+
38459 memset(&tvdata,0,sizeof(tvdata));
38460
38461 eeprom = pvr2_eeprom_fetch(hdw);
38462diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
38463index 5b152ff..3320638 100644
38464--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
38465+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
38466@@ -195,7 +195,7 @@ struct pvr2_hdw {
38467
38468 /* I2C stuff */
38469 struct i2c_adapter i2c_adap;
38470- struct i2c_algorithm i2c_algo;
38471+ i2c_algorithm_no_const i2c_algo;
38472 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
38473 int i2c_cx25840_hack_state;
38474 int i2c_linked;
38475diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
38476index 1eabff6..8e2313a 100644
38477--- a/drivers/media/video/saa7134/saa6752hs.c
38478+++ b/drivers/media/video/saa7134/saa6752hs.c
38479@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
38480 unsigned char localPAT[256];
38481 unsigned char localPMT[256];
38482
38483+ pax_track_stack();
38484+
38485 /* Set video format - must be done first as it resets other settings */
38486 set_reg8(client, 0x41, h->video_format);
38487
38488diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
38489index 9c1d3ac..b1b49e9 100644
38490--- a/drivers/media/video/saa7164/saa7164-cmd.c
38491+++ b/drivers/media/video/saa7164/saa7164-cmd.c
38492@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
38493 wait_queue_head_t *q = 0;
38494 dprintk(DBGLVL_CMD, "%s()\n", __func__);
38495
38496+ pax_track_stack();
38497+
38498 /* While any outstand message on the bus exists... */
38499 do {
38500
38501@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
38502 u8 tmp[512];
38503 dprintk(DBGLVL_CMD, "%s()\n", __func__);
38504
38505+ pax_track_stack();
38506+
38507 while (loop) {
38508
38509 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
38510diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
38511index b085496..cde0270 100644
38512--- a/drivers/media/video/usbvideo/ibmcam.c
38513+++ b/drivers/media/video/usbvideo/ibmcam.c
38514@@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = {
38515 static int __init ibmcam_init(void)
38516 {
38517 struct usbvideo_cb cbTbl;
38518- memset(&cbTbl, 0, sizeof(cbTbl));
38519- cbTbl.probe = ibmcam_probe;
38520- cbTbl.setupOnOpen = ibmcam_setup_on_open;
38521- cbTbl.videoStart = ibmcam_video_start;
38522- cbTbl.videoStop = ibmcam_video_stop;
38523- cbTbl.processData = ibmcam_ProcessIsocData;
38524- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
38525- cbTbl.adjustPicture = ibmcam_adjust_picture;
38526- cbTbl.getFPS = ibmcam_calculate_fps;
38527+ memset((void *)&cbTbl, 0, sizeof(cbTbl));
38528+ *(void **)&cbTbl.probe = ibmcam_probe;
38529+ *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
38530+ *(void **)&cbTbl.videoStart = ibmcam_video_start;
38531+ *(void **)&cbTbl.videoStop = ibmcam_video_stop;
38532+ *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
38533+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
38534+ *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
38535+ *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
38536 return usbvideo_register(
38537 &cams,
38538 MAX_IBMCAM,
38539diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
38540index 31d57f2..600b735 100644
38541--- a/drivers/media/video/usbvideo/konicawc.c
38542+++ b/drivers/media/video/usbvideo/konicawc.c
38543@@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
38544 int error;
38545
38546 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
38547- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
38548+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
38549
38550 cam->input = input_dev = input_allocate_device();
38551 if (!input_dev) {
38552@@ -935,16 +935,16 @@ static int __init konicawc_init(void)
38553 struct usbvideo_cb cbTbl;
38554 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
38555 DRIVER_DESC "\n");
38556- memset(&cbTbl, 0, sizeof(cbTbl));
38557- cbTbl.probe = konicawc_probe;
38558- cbTbl.setupOnOpen = konicawc_setup_on_open;
38559- cbTbl.processData = konicawc_process_isoc;
38560- cbTbl.getFPS = konicawc_calculate_fps;
38561- cbTbl.setVideoMode = konicawc_set_video_mode;
38562- cbTbl.startDataPump = konicawc_start_data;
38563- cbTbl.stopDataPump = konicawc_stop_data;
38564- cbTbl.adjustPicture = konicawc_adjust_picture;
38565- cbTbl.userFree = konicawc_free_uvd;
38566+ memset((void * )&cbTbl, 0, sizeof(cbTbl));
38567+ *(void **)&cbTbl.probe = konicawc_probe;
38568+ *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
38569+ *(void **)&cbTbl.processData = konicawc_process_isoc;
38570+ *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
38571+ *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
38572+ *(void **)&cbTbl.startDataPump = konicawc_start_data;
38573+ *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
38574+ *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
38575+ *(void **)&cbTbl.userFree = konicawc_free_uvd;
38576 return usbvideo_register(
38577 &cams,
38578 MAX_CAMERAS,
38579diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
38580index 803d3e4..c4d1b96 100644
38581--- a/drivers/media/video/usbvideo/quickcam_messenger.c
38582+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
38583@@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
38584 int error;
38585
38586 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
38587- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
38588+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
38589
38590 cam->input = input_dev = input_allocate_device();
38591 if (!input_dev) {
38592diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
38593index fbd1b63..292f9f0 100644
38594--- a/drivers/media/video/usbvideo/ultracam.c
38595+++ b/drivers/media/video/usbvideo/ultracam.c
38596@@ -655,14 +655,14 @@ static int __init ultracam_init(void)
38597 {
38598 struct usbvideo_cb cbTbl;
38599 memset(&cbTbl, 0, sizeof(cbTbl));
38600- cbTbl.probe = ultracam_probe;
38601- cbTbl.setupOnOpen = ultracam_setup_on_open;
38602- cbTbl.videoStart = ultracam_video_start;
38603- cbTbl.videoStop = ultracam_video_stop;
38604- cbTbl.processData = ultracam_ProcessIsocData;
38605- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
38606- cbTbl.adjustPicture = ultracam_adjust_picture;
38607- cbTbl.getFPS = ultracam_calculate_fps;
38608+ *(void **)&cbTbl.probe = ultracam_probe;
38609+ *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
38610+ *(void **)&cbTbl.videoStart = ultracam_video_start;
38611+ *(void **)&cbTbl.videoStop = ultracam_video_stop;
38612+ *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
38613+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
38614+ *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
38615+ *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
38616 return usbvideo_register(
38617 &cams,
38618 MAX_CAMERAS,
38619diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
38620index dea8b32..34f6878 100644
38621--- a/drivers/media/video/usbvideo/usbvideo.c
38622+++ b/drivers/media/video/usbvideo/usbvideo.c
38623@@ -697,15 +697,15 @@ int usbvideo_register(
38624 __func__, cams, base_size, num_cams);
38625
38626 /* Copy callbacks, apply defaults for those that are not set */
38627- memmove(&cams->cb, cbTbl, sizeof(cams->cb));
38628+ memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
38629 if (cams->cb.getFrame == NULL)
38630- cams->cb.getFrame = usbvideo_GetFrame;
38631+ *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
38632 if (cams->cb.disconnect == NULL)
38633- cams->cb.disconnect = usbvideo_Disconnect;
38634+ *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
38635 if (cams->cb.startDataPump == NULL)
38636- cams->cb.startDataPump = usbvideo_StartDataPump;
38637+ *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
38638 if (cams->cb.stopDataPump == NULL)
38639- cams->cb.stopDataPump = usbvideo_StopDataPump;
38640+ *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
38641
38642 cams->num_cameras = num_cams;
38643 cams->cam = (struct uvd *) &cams[1];
38644diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h
38645index c66985b..7fa143a 100644
38646--- a/drivers/media/video/usbvideo/usbvideo.h
38647+++ b/drivers/media/video/usbvideo/usbvideo.h
38648@@ -268,7 +268,7 @@ struct usbvideo_cb {
38649 int (*startDataPump)(struct uvd *uvd);
38650 void (*stopDataPump)(struct uvd *uvd);
38651 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
38652-};
38653+} __no_const;
38654
38655 struct usbvideo {
38656 int num_cameras; /* As allocated */
38657diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
38658index e0f91e4..37554ea 100644
38659--- a/drivers/media/video/usbvision/usbvision-core.c
38660+++ b/drivers/media/video/usbvision/usbvision-core.c
38661@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
38662 unsigned char rv, gv, bv;
38663 static unsigned char *Y, *U, *V;
38664
38665+ pax_track_stack();
38666+
38667 frame = usbvision->curFrame;
38668 imageSize = frame->frmwidth * frame->frmheight;
38669 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
38670diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
38671index 0d06e7c..3d17d24 100644
38672--- a/drivers/media/video/v4l2-device.c
38673+++ b/drivers/media/video/v4l2-device.c
38674@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
38675 EXPORT_SYMBOL_GPL(v4l2_device_register);
38676
38677 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
38678- atomic_t *instance)
38679+ atomic_unchecked_t *instance)
38680 {
38681- int num = atomic_inc_return(instance) - 1;
38682+ int num = atomic_inc_return_unchecked(instance) - 1;
38683 int len = strlen(basename);
38684
38685 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
38686diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
38687index 032ebae..4ebd8e8 100644
38688--- a/drivers/media/video/videobuf-dma-sg.c
38689+++ b/drivers/media/video/videobuf-dma-sg.c
38690@@ -631,6 +631,9 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
38691
38692 static int __videobuf_copy_to_user ( struct videobuf_queue *q,
38693 char __user *data, size_t count,
38694+ int nonblocking ) __size_overflow(3);
38695+static int __videobuf_copy_to_user ( struct videobuf_queue *q,
38696+ char __user *data, size_t count,
38697 int nonblocking )
38698 {
38699 struct videobuf_dma_sg_memory *mem = q->read_buf->priv;
38700@@ -693,6 +696,8 @@ void *videobuf_sg_alloc(size_t size)
38701 {
38702 struct videobuf_queue q;
38703
38704+ pax_track_stack();
38705+
38706 /* Required to make generic handler to call __videobuf_alloc */
38707 q.int_ops = &sg_ops;
38708
38709diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c
38710index 35f3900..aa7c2f1 100644
38711--- a/drivers/media/video/videobuf-vmalloc.c
38712+++ b/drivers/media/video/videobuf-vmalloc.c
38713@@ -330,6 +330,9 @@ error:
38714
38715 static int __videobuf_copy_to_user ( struct videobuf_queue *q,
38716 char __user *data, size_t count,
38717+ int nonblocking ) __size_overflow(3);
38718+static int __videobuf_copy_to_user ( struct videobuf_queue *q,
38719+ char __user *data, size_t count,
38720 int nonblocking )
38721 {
38722 struct videobuf_vmalloc_memory *mem=q->read_buf->priv;
38723diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
38724index b6992b7..9fa7547 100644
38725--- a/drivers/message/fusion/mptbase.c
38726+++ b/drivers/message/fusion/mptbase.c
38727@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
38728 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
38729 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
38730
38731+#ifdef CONFIG_GRKERNSEC_HIDESYM
38732+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
38733+ NULL, NULL);
38734+#else
38735 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
38736 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
38737+#endif
38738+
38739 /*
38740 * Rounding UP to nearest 4-kB boundary here...
38741 */
38742diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
38743index 83873e3..e360e9a 100644
38744--- a/drivers/message/fusion/mptsas.c
38745+++ b/drivers/message/fusion/mptsas.c
38746@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
38747 return 0;
38748 }
38749
38750+static inline void
38751+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38752+{
38753+ if (phy_info->port_details) {
38754+ phy_info->port_details->rphy = rphy;
38755+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38756+ ioc->name, rphy));
38757+ }
38758+
38759+ if (rphy) {
38760+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38761+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38762+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38763+ ioc->name, rphy, rphy->dev.release));
38764+ }
38765+}
38766+
38767 /* no mutex */
38768 static void
38769 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
38770@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
38771 return NULL;
38772 }
38773
38774-static inline void
38775-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38776-{
38777- if (phy_info->port_details) {
38778- phy_info->port_details->rphy = rphy;
38779- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38780- ioc->name, rphy));
38781- }
38782-
38783- if (rphy) {
38784- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38785- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38786- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38787- ioc->name, rphy, rphy->dev.release));
38788- }
38789-}
38790-
38791 static inline struct sas_port *
38792 mptsas_get_port(struct mptsas_phyinfo *phy_info)
38793 {
38794diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
38795index bd096ca..332cf76 100644
38796--- a/drivers/message/fusion/mptscsih.c
38797+++ b/drivers/message/fusion/mptscsih.c
38798@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
38799
38800 h = shost_priv(SChost);
38801
38802- if (h) {
38803- if (h->info_kbuf == NULL)
38804- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38805- return h->info_kbuf;
38806- h->info_kbuf[0] = '\0';
38807+ if (!h)
38808+ return NULL;
38809
38810- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38811- h->info_kbuf[size-1] = '\0';
38812- }
38813+ if (h->info_kbuf == NULL)
38814+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38815+ return h->info_kbuf;
38816+ h->info_kbuf[0] = '\0';
38817+
38818+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38819+ h->info_kbuf[size-1] = '\0';
38820
38821 return h->info_kbuf;
38822 }
38823diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
38824index efba702..59b2c0f 100644
38825--- a/drivers/message/i2o/i2o_config.c
38826+++ b/drivers/message/i2o/i2o_config.c
38827@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg)
38828 struct i2o_message *msg;
38829 unsigned int iop;
38830
38831+ pax_track_stack();
38832+
38833 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
38834 return -EFAULT;
38835
38836diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
38837index 7045c45..c07b170 100644
38838--- a/drivers/message/i2o/i2o_proc.c
38839+++ b/drivers/message/i2o/i2o_proc.c
38840@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
38841 "Array Controller Device"
38842 };
38843
38844-static char *chtostr(u8 * chars, int n)
38845-{
38846- char tmp[256];
38847- tmp[0] = 0;
38848- return strncat(tmp, (char *)chars, n);
38849-}
38850-
38851 static int i2o_report_query_status(struct seq_file *seq, int block_status,
38852 char *group)
38853 {
38854@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
38855
38856 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
38857 seq_printf(seq, "%-#8x", ddm_table.module_id);
38858- seq_printf(seq, "%-29s",
38859- chtostr(ddm_table.module_name_version, 28));
38860+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
38861 seq_printf(seq, "%9d ", ddm_table.data_size);
38862 seq_printf(seq, "%8d", ddm_table.code_size);
38863
38864@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
38865
38866 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
38867 seq_printf(seq, "%-#8x", dst->module_id);
38868- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
38869- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
38870+ seq_printf(seq, "%-.28s", dst->module_name_version);
38871+ seq_printf(seq, "%-.8s", dst->date);
38872 seq_printf(seq, "%8d ", dst->module_size);
38873 seq_printf(seq, "%8d ", dst->mpb_size);
38874 seq_printf(seq, "0x%04x", dst->module_flags);
38875@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
38876 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
38877 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
38878 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
38879- seq_printf(seq, "Vendor info : %s\n",
38880- chtostr((u8 *) (work32 + 2), 16));
38881- seq_printf(seq, "Product info : %s\n",
38882- chtostr((u8 *) (work32 + 6), 16));
38883- seq_printf(seq, "Description : %s\n",
38884- chtostr((u8 *) (work32 + 10), 16));
38885- seq_printf(seq, "Product rev. : %s\n",
38886- chtostr((u8 *) (work32 + 14), 8));
38887+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
38888+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
38889+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
38890+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
38891
38892 seq_printf(seq, "Serial number : ");
38893 print_serial_number(seq, (u8 *) (work32 + 16),
38894@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
38895 }
38896
38897 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
38898- seq_printf(seq, "Module name : %s\n",
38899- chtostr(result.module_name, 24));
38900- seq_printf(seq, "Module revision : %s\n",
38901- chtostr(result.module_rev, 8));
38902+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
38903+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
38904
38905 seq_printf(seq, "Serial number : ");
38906 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
38907@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
38908 return 0;
38909 }
38910
38911- seq_printf(seq, "Device name : %s\n",
38912- chtostr(result.device_name, 64));
38913- seq_printf(seq, "Service name : %s\n",
38914- chtostr(result.service_name, 64));
38915- seq_printf(seq, "Physical name : %s\n",
38916- chtostr(result.physical_location, 64));
38917- seq_printf(seq, "Instance number : %s\n",
38918- chtostr(result.instance_number, 4));
38919+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
38920+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
38921+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
38922+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
38923
38924 return 0;
38925 }
38926diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
38927index 27cf4af..b1205b8 100644
38928--- a/drivers/message/i2o/iop.c
38929+++ b/drivers/message/i2o/iop.c
38930@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
38931
38932 spin_lock_irqsave(&c->context_list_lock, flags);
38933
38934- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
38935- atomic_inc(&c->context_list_counter);
38936+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
38937+ atomic_inc_unchecked(&c->context_list_counter);
38938
38939- entry->context = atomic_read(&c->context_list_counter);
38940+ entry->context = atomic_read_unchecked(&c->context_list_counter);
38941
38942 list_add(&entry->list, &c->context_list);
38943
38944@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
38945
38946 #if BITS_PER_LONG == 64
38947 spin_lock_init(&c->context_list_lock);
38948- atomic_set(&c->context_list_counter, 0);
38949+ atomic_set_unchecked(&c->context_list_counter, 0);
38950 INIT_LIST_HEAD(&c->context_list);
38951 #endif
38952
38953diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
38954index 78e3e85..66c9a0d 100644
38955--- a/drivers/mfd/ab3100-core.c
38956+++ b/drivers/mfd/ab3100-core.c
38957@@ -777,7 +777,7 @@ struct ab_family_id {
38958 char *name;
38959 };
38960
38961-static const struct ab_family_id ids[] __initdata = {
38962+static const struct ab_family_id ids[] __initconst = {
38963 /* AB3100 */
38964 {
38965 .id = 0xc0,
38966diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
38967index 8d8c932..8104515 100644
38968--- a/drivers/mfd/wm8350-i2c.c
38969+++ b/drivers/mfd/wm8350-i2c.c
38970@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
38971 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
38972 int ret;
38973
38974+ pax_track_stack();
38975+
38976 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
38977 return -EINVAL;
38978
38979diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
38980index e4ff50b..4cc3f04 100644
38981--- a/drivers/misc/kgdbts.c
38982+++ b/drivers/misc/kgdbts.c
38983@@ -118,7 +118,7 @@
38984 } while (0)
38985 #define MAX_CONFIG_LEN 40
38986
38987-static struct kgdb_io kgdbts_io_ops;
38988+static const struct kgdb_io kgdbts_io_ops;
38989 static char get_buf[BUFMAX];
38990 static int get_buf_cnt;
38991 static char put_buf[BUFMAX];
38992@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void)
38993 module_put(THIS_MODULE);
38994 }
38995
38996-static struct kgdb_io kgdbts_io_ops = {
38997+static const struct kgdb_io kgdbts_io_ops = {
38998 .name = "kgdbts",
38999 .read_char = kgdbts_get_char,
39000 .write_char = kgdbts_put_char,
39001diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
39002index 37e7cfc..67cfb76 100644
39003--- a/drivers/misc/sgi-gru/gruhandles.c
39004+++ b/drivers/misc/sgi-gru/gruhandles.c
39005@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
39006
39007 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
39008 {
39009- atomic_long_inc(&mcs_op_statistics[op].count);
39010- atomic_long_add(clks, &mcs_op_statistics[op].total);
39011+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
39012+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
39013 if (mcs_op_statistics[op].max < clks)
39014 mcs_op_statistics[op].max = clks;
39015 }
39016diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
39017index 3f2375c..467c6e6 100644
39018--- a/drivers/misc/sgi-gru/gruprocfs.c
39019+++ b/drivers/misc/sgi-gru/gruprocfs.c
39020@@ -32,9 +32,9 @@
39021
39022 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
39023
39024-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
39025+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
39026 {
39027- unsigned long val = atomic_long_read(v);
39028+ unsigned long val = atomic_long_read_unchecked(v);
39029
39030 if (val)
39031 seq_printf(s, "%16lu %s\n", val, id);
39032@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
39033 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
39034
39035 for (op = 0; op < mcsop_last; op++) {
39036- count = atomic_long_read(&mcs_op_statistics[op].count);
39037- total = atomic_long_read(&mcs_op_statistics[op].total);
39038+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
39039+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
39040 max = mcs_op_statistics[op].max;
39041 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
39042 count ? total / count : 0, max);
39043diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
39044index 46990bc..4a251b5 100644
39045--- a/drivers/misc/sgi-gru/grutables.h
39046+++ b/drivers/misc/sgi-gru/grutables.h
39047@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
39048 * GRU statistics.
39049 */
39050 struct gru_stats_s {
39051- atomic_long_t vdata_alloc;
39052- atomic_long_t vdata_free;
39053- atomic_long_t gts_alloc;
39054- atomic_long_t gts_free;
39055- atomic_long_t vdata_double_alloc;
39056- atomic_long_t gts_double_allocate;
39057- atomic_long_t assign_context;
39058- atomic_long_t assign_context_failed;
39059- atomic_long_t free_context;
39060- atomic_long_t load_user_context;
39061- atomic_long_t load_kernel_context;
39062- atomic_long_t lock_kernel_context;
39063- atomic_long_t unlock_kernel_context;
39064- atomic_long_t steal_user_context;
39065- atomic_long_t steal_kernel_context;
39066- atomic_long_t steal_context_failed;
39067- atomic_long_t nopfn;
39068- atomic_long_t break_cow;
39069- atomic_long_t asid_new;
39070- atomic_long_t asid_next;
39071- atomic_long_t asid_wrap;
39072- atomic_long_t asid_reuse;
39073- atomic_long_t intr;
39074- atomic_long_t intr_mm_lock_failed;
39075- atomic_long_t call_os;
39076- atomic_long_t call_os_offnode_reference;
39077- atomic_long_t call_os_check_for_bug;
39078- atomic_long_t call_os_wait_queue;
39079- atomic_long_t user_flush_tlb;
39080- atomic_long_t user_unload_context;
39081- atomic_long_t user_exception;
39082- atomic_long_t set_context_option;
39083- atomic_long_t migrate_check;
39084- atomic_long_t migrated_retarget;
39085- atomic_long_t migrated_unload;
39086- atomic_long_t migrated_unload_delay;
39087- atomic_long_t migrated_nopfn_retarget;
39088- atomic_long_t migrated_nopfn_unload;
39089- atomic_long_t tlb_dropin;
39090- atomic_long_t tlb_dropin_fail_no_asid;
39091- atomic_long_t tlb_dropin_fail_upm;
39092- atomic_long_t tlb_dropin_fail_invalid;
39093- atomic_long_t tlb_dropin_fail_range_active;
39094- atomic_long_t tlb_dropin_fail_idle;
39095- atomic_long_t tlb_dropin_fail_fmm;
39096- atomic_long_t tlb_dropin_fail_no_exception;
39097- atomic_long_t tlb_dropin_fail_no_exception_war;
39098- atomic_long_t tfh_stale_on_fault;
39099- atomic_long_t mmu_invalidate_range;
39100- atomic_long_t mmu_invalidate_page;
39101- atomic_long_t mmu_clear_flush_young;
39102- atomic_long_t flush_tlb;
39103- atomic_long_t flush_tlb_gru;
39104- atomic_long_t flush_tlb_gru_tgh;
39105- atomic_long_t flush_tlb_gru_zero_asid;
39106+ atomic_long_unchecked_t vdata_alloc;
39107+ atomic_long_unchecked_t vdata_free;
39108+ atomic_long_unchecked_t gts_alloc;
39109+ atomic_long_unchecked_t gts_free;
39110+ atomic_long_unchecked_t vdata_double_alloc;
39111+ atomic_long_unchecked_t gts_double_allocate;
39112+ atomic_long_unchecked_t assign_context;
39113+ atomic_long_unchecked_t assign_context_failed;
39114+ atomic_long_unchecked_t free_context;
39115+ atomic_long_unchecked_t load_user_context;
39116+ atomic_long_unchecked_t load_kernel_context;
39117+ atomic_long_unchecked_t lock_kernel_context;
39118+ atomic_long_unchecked_t unlock_kernel_context;
39119+ atomic_long_unchecked_t steal_user_context;
39120+ atomic_long_unchecked_t steal_kernel_context;
39121+ atomic_long_unchecked_t steal_context_failed;
39122+ atomic_long_unchecked_t nopfn;
39123+ atomic_long_unchecked_t break_cow;
39124+ atomic_long_unchecked_t asid_new;
39125+ atomic_long_unchecked_t asid_next;
39126+ atomic_long_unchecked_t asid_wrap;
39127+ atomic_long_unchecked_t asid_reuse;
39128+ atomic_long_unchecked_t intr;
39129+ atomic_long_unchecked_t intr_mm_lock_failed;
39130+ atomic_long_unchecked_t call_os;
39131+ atomic_long_unchecked_t call_os_offnode_reference;
39132+ atomic_long_unchecked_t call_os_check_for_bug;
39133+ atomic_long_unchecked_t call_os_wait_queue;
39134+ atomic_long_unchecked_t user_flush_tlb;
39135+ atomic_long_unchecked_t user_unload_context;
39136+ atomic_long_unchecked_t user_exception;
39137+ atomic_long_unchecked_t set_context_option;
39138+ atomic_long_unchecked_t migrate_check;
39139+ atomic_long_unchecked_t migrated_retarget;
39140+ atomic_long_unchecked_t migrated_unload;
39141+ atomic_long_unchecked_t migrated_unload_delay;
39142+ atomic_long_unchecked_t migrated_nopfn_retarget;
39143+ atomic_long_unchecked_t migrated_nopfn_unload;
39144+ atomic_long_unchecked_t tlb_dropin;
39145+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
39146+ atomic_long_unchecked_t tlb_dropin_fail_upm;
39147+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
39148+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
39149+ atomic_long_unchecked_t tlb_dropin_fail_idle;
39150+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
39151+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
39152+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
39153+ atomic_long_unchecked_t tfh_stale_on_fault;
39154+ atomic_long_unchecked_t mmu_invalidate_range;
39155+ atomic_long_unchecked_t mmu_invalidate_page;
39156+ atomic_long_unchecked_t mmu_clear_flush_young;
39157+ atomic_long_unchecked_t flush_tlb;
39158+ atomic_long_unchecked_t flush_tlb_gru;
39159+ atomic_long_unchecked_t flush_tlb_gru_tgh;
39160+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
39161
39162- atomic_long_t copy_gpa;
39163+ atomic_long_unchecked_t copy_gpa;
39164
39165- atomic_long_t mesq_receive;
39166- atomic_long_t mesq_receive_none;
39167- atomic_long_t mesq_send;
39168- atomic_long_t mesq_send_failed;
39169- atomic_long_t mesq_noop;
39170- atomic_long_t mesq_send_unexpected_error;
39171- atomic_long_t mesq_send_lb_overflow;
39172- atomic_long_t mesq_send_qlimit_reached;
39173- atomic_long_t mesq_send_amo_nacked;
39174- atomic_long_t mesq_send_put_nacked;
39175- atomic_long_t mesq_qf_not_full;
39176- atomic_long_t mesq_qf_locked;
39177- atomic_long_t mesq_qf_noop_not_full;
39178- atomic_long_t mesq_qf_switch_head_failed;
39179- atomic_long_t mesq_qf_unexpected_error;
39180- atomic_long_t mesq_noop_unexpected_error;
39181- atomic_long_t mesq_noop_lb_overflow;
39182- atomic_long_t mesq_noop_qlimit_reached;
39183- atomic_long_t mesq_noop_amo_nacked;
39184- atomic_long_t mesq_noop_put_nacked;
39185+ atomic_long_unchecked_t mesq_receive;
39186+ atomic_long_unchecked_t mesq_receive_none;
39187+ atomic_long_unchecked_t mesq_send;
39188+ atomic_long_unchecked_t mesq_send_failed;
39189+ atomic_long_unchecked_t mesq_noop;
39190+ atomic_long_unchecked_t mesq_send_unexpected_error;
39191+ atomic_long_unchecked_t mesq_send_lb_overflow;
39192+ atomic_long_unchecked_t mesq_send_qlimit_reached;
39193+ atomic_long_unchecked_t mesq_send_amo_nacked;
39194+ atomic_long_unchecked_t mesq_send_put_nacked;
39195+ atomic_long_unchecked_t mesq_qf_not_full;
39196+ atomic_long_unchecked_t mesq_qf_locked;
39197+ atomic_long_unchecked_t mesq_qf_noop_not_full;
39198+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
39199+ atomic_long_unchecked_t mesq_qf_unexpected_error;
39200+ atomic_long_unchecked_t mesq_noop_unexpected_error;
39201+ atomic_long_unchecked_t mesq_noop_lb_overflow;
39202+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
39203+ atomic_long_unchecked_t mesq_noop_amo_nacked;
39204+ atomic_long_unchecked_t mesq_noop_put_nacked;
39205
39206 };
39207
39208@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
39209 cchop_deallocate, tghop_invalidate, mcsop_last};
39210
39211 struct mcs_op_statistic {
39212- atomic_long_t count;
39213- atomic_long_t total;
39214+ atomic_long_unchecked_t count;
39215+ atomic_long_unchecked_t total;
39216 unsigned long max;
39217 };
39218
39219@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
39220
39221 #define STAT(id) do { \
39222 if (gru_options & OPT_STATS) \
39223- atomic_long_inc(&gru_stats.id); \
39224+ atomic_long_inc_unchecked(&gru_stats.id); \
39225 } while (0)
39226
39227 #ifdef CONFIG_SGI_GRU_DEBUG
39228diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
39229index 2275126..12a9dbfb 100644
39230--- a/drivers/misc/sgi-xp/xp.h
39231+++ b/drivers/misc/sgi-xp/xp.h
39232@@ -289,7 +289,7 @@ struct xpc_interface {
39233 xpc_notify_func, void *);
39234 void (*received) (short, int, void *);
39235 enum xp_retval (*partid_to_nasids) (short, void *);
39236-};
39237+} __no_const;
39238
39239 extern struct xpc_interface xpc_interface;
39240
39241diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
39242index b94d5f7..7f494c5 100644
39243--- a/drivers/misc/sgi-xp/xpc.h
39244+++ b/drivers/misc/sgi-xp/xpc.h
39245@@ -835,6 +835,7 @@ struct xpc_arch_operations {
39246 void (*received_payload) (struct xpc_channel *, void *);
39247 void (*notify_senders_of_disconnect) (struct xpc_channel *);
39248 };
39249+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
39250
39251 /* struct xpc_partition act_state values (for XPC HB) */
39252
39253@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
39254 /* found in xpc_main.c */
39255 extern struct device *xpc_part;
39256 extern struct device *xpc_chan;
39257-extern struct xpc_arch_operations xpc_arch_ops;
39258+extern xpc_arch_operations_no_const xpc_arch_ops;
39259 extern int xpc_disengage_timelimit;
39260 extern int xpc_disengage_timedout;
39261 extern int xpc_activate_IRQ_rcvd;
39262diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
39263index fd3688a..7e211a4 100644
39264--- a/drivers/misc/sgi-xp/xpc_main.c
39265+++ b/drivers/misc/sgi-xp/xpc_main.c
39266@@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = {
39267 .notifier_call = xpc_system_die,
39268 };
39269
39270-struct xpc_arch_operations xpc_arch_ops;
39271+xpc_arch_operations_no_const xpc_arch_ops;
39272
39273 /*
39274 * Timer function to enforce the timelimit on the partition disengage.
39275diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
39276index 8b70e03..700bda6 100644
39277--- a/drivers/misc/sgi-xp/xpc_sn2.c
39278+++ b/drivers/misc/sgi-xp/xpc_sn2.c
39279@@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
39280 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
39281 }
39282
39283-static struct xpc_arch_operations xpc_arch_ops_sn2 = {
39284+static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
39285 .setup_partitions = xpc_setup_partitions_sn2,
39286 .teardown_partitions = xpc_teardown_partitions_sn2,
39287 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
39288@@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
39289 int ret;
39290 size_t buf_size;
39291
39292- xpc_arch_ops = xpc_arch_ops_sn2;
39293+ pax_open_kernel();
39294+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
39295+ pax_close_kernel();
39296
39297 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
39298 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
39299diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
39300index 8e08d71..7cb8c9b 100644
39301--- a/drivers/misc/sgi-xp/xpc_uv.c
39302+++ b/drivers/misc/sgi-xp/xpc_uv.c
39303@@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
39304 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
39305 }
39306
39307-static struct xpc_arch_operations xpc_arch_ops_uv = {
39308+static const struct xpc_arch_operations xpc_arch_ops_uv = {
39309 .setup_partitions = xpc_setup_partitions_uv,
39310 .teardown_partitions = xpc_teardown_partitions_uv,
39311 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
39312@@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
39313 int
39314 xpc_init_uv(void)
39315 {
39316- xpc_arch_ops = xpc_arch_ops_uv;
39317+ pax_open_kernel();
39318+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
39319+ pax_close_kernel();
39320
39321 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
39322 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
39323diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
39324index 6fd20b42..650efe3 100644
39325--- a/drivers/mmc/host/sdhci-pci.c
39326+++ b/drivers/mmc/host/sdhci-pci.c
39327@@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
39328 .probe = via_probe,
39329 };
39330
39331-static const struct pci_device_id pci_ids[] __devinitdata = {
39332+static const struct pci_device_id pci_ids[] __devinitconst = {
39333 {
39334 .vendor = PCI_VENDOR_ID_RICOH,
39335 .device = PCI_DEVICE_ID_RICOH_R5C822,
39336diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
39337index e7563a9..5f90ce5 100644
39338--- a/drivers/mtd/chips/cfi_cmdset_0001.c
39339+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
39340@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
39341 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
39342 unsigned long timeo = jiffies + HZ;
39343
39344+ pax_track_stack();
39345+
39346 /* Prevent setting state FL_SYNCING for chip in suspended state. */
39347 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
39348 goto sleep;
39349@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
39350 unsigned long initial_adr;
39351 int initial_len = len;
39352
39353+ pax_track_stack();
39354+
39355 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
39356 adr += chip->start;
39357 initial_adr = adr;
39358@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
39359 int retries = 3;
39360 int ret;
39361
39362+ pax_track_stack();
39363+
39364 adr += chip->start;
39365
39366 retry:
39367diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
39368index 0667a67..3ab97ed 100644
39369--- a/drivers/mtd/chips/cfi_cmdset_0020.c
39370+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
39371@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
39372 unsigned long cmd_addr;
39373 struct cfi_private *cfi = map->fldrv_priv;
39374
39375+ pax_track_stack();
39376+
39377 adr += chip->start;
39378
39379 /* Ensure cmd read/writes are aligned. */
39380@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
39381 DECLARE_WAITQUEUE(wait, current);
39382 int wbufsize, z;
39383
39384+ pax_track_stack();
39385+
39386 /* M58LW064A requires bus alignment for buffer wriets -- saw */
39387 if (adr & (map_bankwidth(map)-1))
39388 return -EINVAL;
39389@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
39390 DECLARE_WAITQUEUE(wait, current);
39391 int ret = 0;
39392
39393+ pax_track_stack();
39394+
39395 adr += chip->start;
39396
39397 /* Let's determine this according to the interleave only once */
39398@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
39399 unsigned long timeo = jiffies + HZ;
39400 DECLARE_WAITQUEUE(wait, current);
39401
39402+ pax_track_stack();
39403+
39404 adr += chip->start;
39405
39406 /* Let's determine this according to the interleave only once */
39407@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
39408 unsigned long timeo = jiffies + HZ;
39409 DECLARE_WAITQUEUE(wait, current);
39410
39411+ pax_track_stack();
39412+
39413 adr += chip->start;
39414
39415 /* Let's determine this according to the interleave only once */
39416diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
39417index 5bf5f46..c5de373 100644
39418--- a/drivers/mtd/devices/doc2000.c
39419+++ b/drivers/mtd/devices/doc2000.c
39420@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
39421
39422 /* The ECC will not be calculated correctly if less than 512 is written */
39423 /* DBB-
39424- if (len != 0x200 && eccbuf)
39425+ if (len != 0x200)
39426 printk(KERN_WARNING
39427 "ECC needs a full sector write (adr: %lx size %lx)\n",
39428 (long) to, (long) len);
39429diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
39430index 0990f78..bb4e8a4 100644
39431--- a/drivers/mtd/devices/doc2001.c
39432+++ b/drivers/mtd/devices/doc2001.c
39433@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
39434 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
39435
39436 /* Don't allow read past end of device */
39437- if (from >= this->totlen)
39438+ if (from >= this->totlen || !len)
39439 return -EINVAL;
39440
39441 /* Don't allow a single read to cross a 512-byte block boundary */
39442diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
39443index e56d6b4..f07e6cf 100644
39444--- a/drivers/mtd/ftl.c
39445+++ b/drivers/mtd/ftl.c
39446@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
39447 loff_t offset;
39448 uint16_t srcunitswap = cpu_to_le16(srcunit);
39449
39450+ pax_track_stack();
39451+
39452 eun = &part->EUNInfo[srcunit];
39453 xfer = &part->XferInfo[xferunit];
39454 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
39455diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
39456index 8aca552..146446e 100755
39457--- a/drivers/mtd/inftlcore.c
39458+++ b/drivers/mtd/inftlcore.c
39459@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
39460 struct inftl_oob oob;
39461 size_t retlen;
39462
39463+ pax_track_stack();
39464+
39465 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
39466 "pending=%d)\n", inftl, thisVUC, pendingblock);
39467
39468diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
39469index 32e82ae..ed50953 100644
39470--- a/drivers/mtd/inftlmount.c
39471+++ b/drivers/mtd/inftlmount.c
39472@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
39473 struct INFTLPartition *ip;
39474 size_t retlen;
39475
39476+ pax_track_stack();
39477+
39478 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
39479
39480 /*
39481diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
39482index 79bf40f..fe5f8fd 100644
39483--- a/drivers/mtd/lpddr/qinfo_probe.c
39484+++ b/drivers/mtd/lpddr/qinfo_probe.c
39485@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
39486 {
39487 map_word pfow_val[4];
39488
39489+ pax_track_stack();
39490+
39491 /* Check identification string */
39492 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
39493 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
39494diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
39495index 726a1b8..f46b460 100644
39496--- a/drivers/mtd/mtdchar.c
39497+++ b/drivers/mtd/mtdchar.c
39498@@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
39499 u_long size;
39500 struct mtd_info_user info;
39501
39502+ pax_track_stack();
39503+
39504 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
39505
39506 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
39507diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
39508index 1002e18..26d82d5 100644
39509--- a/drivers/mtd/nftlcore.c
39510+++ b/drivers/mtd/nftlcore.c
39511@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
39512 int inplace = 1;
39513 size_t retlen;
39514
39515+ pax_track_stack();
39516+
39517 memset(BlockMap, 0xff, sizeof(BlockMap));
39518 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
39519
39520diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
39521index 8b22b18..6fada85 100644
39522--- a/drivers/mtd/nftlmount.c
39523+++ b/drivers/mtd/nftlmount.c
39524@@ -23,6 +23,7 @@
39525 #include <asm/errno.h>
39526 #include <linux/delay.h>
39527 #include <linux/slab.h>
39528+#include <linux/sched.h>
39529 #include <linux/mtd/mtd.h>
39530 #include <linux/mtd/nand.h>
39531 #include <linux/mtd/nftl.h>
39532@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
39533 struct mtd_info *mtd = nftl->mbd.mtd;
39534 unsigned int i;
39535
39536+ pax_track_stack();
39537+
39538 /* Assume logical EraseSize == physical erasesize for starting the scan.
39539 We'll sort it out later if we find a MediaHeader which says otherwise */
39540 /* Actually, we won't. The new DiskOnChip driver has already scanned
39541diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
39542index 14cec04..09d8519 100644
39543--- a/drivers/mtd/ubi/build.c
39544+++ b/drivers/mtd/ubi/build.c
39545@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
39546 static int __init bytes_str_to_int(const char *str)
39547 {
39548 char *endp;
39549- unsigned long result;
39550+ unsigned long result, scale = 1;
39551
39552 result = simple_strtoul(str, &endp, 0);
39553 if (str == endp || result >= INT_MAX) {
39554@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const char *str)
39555
39556 switch (*endp) {
39557 case 'G':
39558- result *= 1024;
39559+ scale *= 1024;
39560 case 'M':
39561- result *= 1024;
39562+ scale *= 1024;
39563 case 'K':
39564- result *= 1024;
39565+ scale *= 1024;
39566 if (endp[1] == 'i' && endp[2] == 'B')
39567 endp += 2;
39568 case '\0':
39569@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const char *str)
39570 return -EINVAL;
39571 }
39572
39573- return result;
39574+ if (result*scale >= INT_MAX) {
39575+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
39576+ str);
39577+ return -EINVAL;
39578+ }
39579+
39580+ return result*scale;
39581 }
39582
39583 /**
39584diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
39585index ab68886..ca405e8 100644
39586--- a/drivers/net/atlx/atl2.c
39587+++ b/drivers/net/atlx/atl2.c
39588@@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
39589 */
39590
39591 #define ATL2_PARAM(X, desc) \
39592- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
39593+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
39594 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
39595 MODULE_PARM_DESC(X, desc);
39596 #else
39597diff --git a/drivers/net/benet/Makefile b/drivers/net/benet/Makefile
39598index a60cd80..0ed11ef 100644
39599--- a/drivers/net/benet/Makefile
39600+++ b/drivers/net/benet/Makefile
39601@@ -1,7 +1,9 @@
39602 #
39603-# Makefile to build the network driver for ServerEngine's BladeEngine.
39604+# Makefile to build the be2net network driver
39605 #
39606
39607+EXTRA_CFLAGS += -DCONFIG_PALAU
39608+
39609 obj-$(CONFIG_BE2NET) += be2net.o
39610
39611-be2net-y := be_main.o be_cmds.o be_ethtool.o
39612+be2net-y := be_main.o be_cmds.o be_ethtool.o be_compat.o be_misc.o
39613diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
39614index 5c74ff0..7382603 100644
39615--- a/drivers/net/benet/be.h
39616+++ b/drivers/net/benet/be.h
39617@@ -1,18 +1,18 @@
39618 /*
39619- * Copyright (C) 2005 - 2009 ServerEngines
39620+ * Copyright (C) 2005 - 2011 Emulex
39621 * All rights reserved.
39622 *
39623 * This program is free software; you can redistribute it and/or
39624 * modify it under the terms of the GNU General Public License version 2
39625- * as published by the Free Software Foundation. The full GNU General
39626+ * as published by the Free Software Foundation. The full GNU General
39627 * Public License is included in this distribution in the file called COPYING.
39628 *
39629 * Contact Information:
39630- * linux-drivers@serverengines.com
39631+ * linux-drivers@emulex.com
39632 *
39633- * ServerEngines
39634- * 209 N. Fair Oaks Ave
39635- * Sunnyvale, CA 94085
39636+ * Emulex
39637+ * 3333 Susan Street
39638+ * Costa Mesa, CA 92626
39639 */
39640
39641 #ifndef BE_H
39642@@ -29,32 +29,53 @@
39643 #include <linux/workqueue.h>
39644 #include <linux/interrupt.h>
39645 #include <linux/firmware.h>
39646+#include <linux/jhash.h>
39647+#ifndef CONFIG_PALAU
39648+#include <linux/inet_lro.h>
39649+#endif
39650
39651+#ifdef CONFIG_PALAU
39652+#include "be_compat.h"
39653+#endif
39654 #include "be_hw.h"
39655
39656-#define DRV_VER "2.101.205"
39657+#ifdef CONFIG_PALAU
39658+#include "version.h"
39659+#define DRV_VER STR_BE_MAJOR "." STR_BE_MINOR "."\
39660+ STR_BE_BUILD "." STR_BE_BRANCH
39661+#else
39662+#define DRV_VER "2.0.348"
39663+#endif
39664 #define DRV_NAME "be2net"
39665-#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
39666-#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
39667-#define OC_NAME "Emulex OneConnect 10Gbps NIC"
39668-#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)"
39669-#define DRV_DESC BE_NAME "Driver"
39670+#define BE_NAME "Emulex BladeEngine2"
39671+#define BE3_NAME "Emulex BladeEngine3"
39672+#define OC_NAME "Emulex OneConnect"
39673+#define OC_NAME_BE OC_NAME "(be3)"
39674+#define OC_NAME_LANCER OC_NAME "(Lancer)"
39675+#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver"
39676
39677-#define BE_VENDOR_ID 0x19a2
39678+#define BE_VENDOR_ID 0x19a2
39679+#define EMULEX_VENDOR_ID 0x10df
39680 #define BE_DEVICE_ID1 0x211
39681 #define BE_DEVICE_ID2 0x221
39682-#define OC_DEVICE_ID1 0x700
39683-#define OC_DEVICE_ID2 0x701
39684-#define OC_DEVICE_ID3 0x710
39685+#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
39686+#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
39687+#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
39688+
39689+#define OC_SUBSYS_DEVICE_ID1 0xE602
39690+#define OC_SUBSYS_DEVICE_ID2 0xE642
39691+#define OC_SUBSYS_DEVICE_ID3 0xE612
39692+#define OC_SUBSYS_DEVICE_ID4 0xE652
39693
39694 static inline char *nic_name(struct pci_dev *pdev)
39695 {
39696 switch (pdev->device) {
39697 case OC_DEVICE_ID1:
39698- case OC_DEVICE_ID2:
39699 return OC_NAME;
39700+ case OC_DEVICE_ID2:
39701+ return OC_NAME_BE;
39702 case OC_DEVICE_ID3:
39703- return OC_NAME1;
39704+ return OC_NAME_LANCER;
39705 case BE_DEVICE_ID2:
39706 return BE3_NAME;
39707 default:
39708@@ -63,7 +84,7 @@ static inline char *nic_name(struct pci_dev *pdev)
39709 }
39710
39711 /* Number of bytes of an RX frame that are copied to skb->data */
39712-#define BE_HDR_LEN 64
39713+#define BE_HDR_LEN ((u16) 64)
39714 #define BE_MAX_JUMBO_FRAME_SIZE 9018
39715 #define BE_MIN_MTU 256
39716
39717@@ -79,10 +100,24 @@ static inline char *nic_name(struct pci_dev *pdev)
39718 #define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
39719 #define MCC_CQ_LEN 256
39720
39721+#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */
39722+
39723+#define MAX_RX_QS (MAX_RSS_QS + 1)
39724+
39725+#ifdef MQ_TX
39726+#define MAX_TX_QS 8
39727+#else
39728+#define MAX_TX_QS 1
39729+#endif
39730+
39731+#define BE_MAX_MSIX_VECTORS (MAX_RX_QS + 1)/* RSS qs + 1 def Rx + Tx */
39732 #define BE_NAPI_WEIGHT 64
39733-#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
39734+#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
39735 #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
39736
39737+#define BE_MAX_LRO_DESCRIPTORS 16
39738+#define BE_MAX_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS))
39739+
39740 #define FW_VER_LEN 32
39741
39742 struct be_dma_mem {
39743@@ -127,6 +162,11 @@ static inline void *queue_tail_node(struct be_queue_info *q)
39744 return q->dma_mem.va + q->tail * q->entry_size;
39745 }
39746
39747+static inline void *queue_index_node(struct be_queue_info *q, u16 index)
39748+{
39749+ return q->dma_mem.va + index * q->entry_size;
39750+}
39751+
39752 static inline void queue_head_inc(struct be_queue_info *q)
39753 {
39754 index_inc(&q->head, q->len);
39755@@ -137,6 +177,7 @@ static inline void queue_tail_inc(struct be_queue_info *q)
39756 index_inc(&q->tail, q->len);
39757 }
39758
39759+
39760 struct be_eq_obj {
39761 struct be_queue_info q;
39762 char desc[32];
39763@@ -146,6 +187,7 @@ struct be_eq_obj {
39764 u16 min_eqd; /* in usecs */
39765 u16 max_eqd; /* in usecs */
39766 u16 cur_eqd; /* in usecs */
39767+ u8 eq_idx;
39768
39769 struct napi_struct napi;
39770 };
39771@@ -153,49 +195,20 @@ struct be_eq_obj {
39772 struct be_mcc_obj {
39773 struct be_queue_info q;
39774 struct be_queue_info cq;
39775+ bool rearm_cq;
39776 };
39777
39778-struct be_drvr_stats {
39779+struct be_tx_stats {
39780 u32 be_tx_reqs; /* number of TX requests initiated */
39781 u32 be_tx_stops; /* number of times TX Q was stopped */
39782- u32 be_fwd_reqs; /* number of send reqs through forwarding i/f */
39783 u32 be_tx_wrbs; /* number of tx WRBs used */
39784- u32 be_tx_events; /* number of tx completion events */
39785 u32 be_tx_compl; /* number of tx completion entries processed */
39786 ulong be_tx_jiffies;
39787 u64 be_tx_bytes;
39788 u64 be_tx_bytes_prev;
39789 u64 be_tx_pkts;
39790 u32 be_tx_rate;
39791-
39792- u32 cache_barrier[16];
39793-
39794- u32 be_ethrx_post_fail;/* number of ethrx buffer alloc failures */
39795- u32 be_polls; /* number of times NAPI called poll function */
39796- u32 be_rx_events; /* number of ucast rx completion events */
39797- u32 be_rx_compl; /* number of rx completion entries processed */
39798- ulong be_rx_jiffies;
39799- u64 be_rx_bytes;
39800- u64 be_rx_bytes_prev;
39801- u64 be_rx_pkts;
39802- u32 be_rx_rate;
39803- /* number of non ether type II frames dropped where
39804- * frame len > length field of Mac Hdr */
39805- u32 be_802_3_dropped_frames;
39806- /* number of non ether type II frames malformed where
39807- * in frame len < length field of Mac Hdr */
39808- u32 be_802_3_malformed_frames;
39809- u32 be_rxcp_err; /* Num rx completion entries w/ err set. */
39810- ulong rx_fps_jiffies; /* jiffies at last FPS calc */
39811- u32 be_rx_frags;
39812- u32 be_prev_rx_frags;
39813- u32 be_rx_fps; /* Rx frags per second */
39814-};
39815-
39816-struct be_stats_obj {
39817- struct be_drvr_stats drvr_stats;
39818- struct net_device_stats net_stats;
39819- struct be_dma_mem cmd;
39820+ u32 be_ipv6_ext_hdr_tx_drop;
39821 };
39822
39823 struct be_tx_obj {
39824@@ -203,23 +216,124 @@ struct be_tx_obj {
39825 struct be_queue_info cq;
39826 /* Remember the skbs that were transmitted */
39827 struct sk_buff *sent_skb_list[TX_Q_LEN];
39828+ struct be_tx_stats stats;
39829 };
39830
39831 /* Struct to remember the pages posted for rx frags */
39832 struct be_rx_page_info {
39833 struct page *page;
39834- dma_addr_t bus;
39835+ DEFINE_DMA_UNMAP_ADDR(bus);
39836 u16 page_offset;
39837 bool last_page_user;
39838 };
39839
39840+struct be_rx_stats {
39841+ u32 rx_post_fail;/* number of ethrx buffer alloc failures */
39842+ u32 rx_polls; /* number of times NAPI called poll function */
39843+ u32 rx_events; /* number of ucast rx completion events */
39844+ u32 rx_compl; /* number of rx completion entries processed */
39845+ ulong rx_jiffies;
39846+ u64 rx_bytes;
39847+ u64 rx_bytes_prev;
39848+ u64 rx_pkts;
39849+ u32 rx_rate;
39850+ u32 rx_mcast_pkts;
39851+ u32 rxcp_err; /* Num rx completion entries w/ err set. */
39852+ ulong rx_fps_jiffies; /* jiffies at last FPS calc */
39853+ u32 rx_frags;
39854+ u32 prev_rx_frags;
39855+ u32 rx_fps; /* Rx frags per second */
39856+ u32 rx_drops_no_frags;
39857+};
39858+
39859+struct be_rx_compl_info {
39860+ u32 rss_hash;
39861+ u16 vlan_tag;
39862+ u16 pkt_size;
39863+ u16 rxq_idx;
39864+ u16 port;
39865+ u8 vlanf;
39866+ u8 num_rcvd;
39867+ u8 err;
39868+ u8 ipf;
39869+ u8 tcpf;
39870+ u8 udpf;
39871+ u8 ip_csum;
39872+ u8 l4_csum;
39873+ u8 ipv6;
39874+ u8 vtm;
39875+ u8 pkt_type;
39876+};
39877+
39878 struct be_rx_obj {
39879+ struct be_adapter *adapter;
39880 struct be_queue_info q;
39881 struct be_queue_info cq;
39882- struct be_rx_page_info page_info_tbl[RX_Q_LEN];
39883+ struct be_rx_compl_info rxcp;
39884+ struct be_rx_page_info *page_info_tbl;
39885+ struct net_lro_mgr lro_mgr;
39886+ struct net_lro_desc lro_desc[BE_MAX_LRO_DESCRIPTORS];
39887+ struct be_eq_obj rx_eq;
39888+ struct be_rx_stats stats;
39889+ u8 rss_id;
39890+ bool rx_post_starved; /* Zero rx frags have been posted to BE */
39891+ u16 prev_frag_idx;
39892+ u32 cache_line_barrier[16];
39893 };
39894
39895-#define BE_NUM_MSIX_VECTORS 2 /* 1 each for Tx and Rx */
39896+struct be_drv_stats {
39897+ u32 be_on_die_temperature;
39898+ u32 be_tx_events;
39899+ u32 eth_red_drops;
39900+ u32 rx_drops_no_pbuf;
39901+ u32 rx_drops_no_txpb;
39902+ u32 rx_drops_no_erx_descr;
39903+ u32 rx_drops_no_tpre_descr;
39904+ u32 rx_drops_too_many_frags;
39905+ u32 rx_drops_invalid_ring;
39906+ u32 forwarded_packets;
39907+ u32 rx_drops_mtu;
39908+ u32 rx_crc_errors;
39909+ u32 rx_alignment_symbol_errors;
39910+ u32 rx_pause_frames;
39911+ u32 rx_priority_pause_frames;
39912+ u32 rx_control_frames;
39913+ u32 rx_in_range_errors;
39914+ u32 rx_out_range_errors;
39915+ u32 rx_frame_too_long;
39916+ u32 rx_address_match_errors;
39917+ u32 rx_dropped_too_small;
39918+ u32 rx_dropped_too_short;
39919+ u32 rx_dropped_header_too_small;
39920+ u32 rx_dropped_tcp_length;
39921+ u32 rx_dropped_runt;
39922+ u32 rx_ip_checksum_errs;
39923+ u32 rx_tcp_checksum_errs;
39924+ u32 rx_udp_checksum_errs;
39925+ u32 rx_switched_unicast_packets;
39926+ u32 rx_switched_multicast_packets;
39927+ u32 rx_switched_broadcast_packets;
39928+ u32 tx_pauseframes;
39929+ u32 tx_priority_pauseframes;
39930+ u32 tx_controlframes;
39931+ u32 rxpp_fifo_overflow_drop;
39932+ u32 rx_input_fifo_overflow_drop;
39933+ u32 pmem_fifo_overflow_drop;
39934+ u32 jabber_events;
39935+};
39936+
39937+struct be_vf_cfg {
39938+ unsigned char vf_mac_addr[ETH_ALEN];
39939+ u32 vf_if_handle;
39940+ u32 vf_pmac_id;
39941+ u16 vf_def_vid;
39942+ u16 vf_vlan_tag;
39943+ u32 vf_tx_rate;
39944+};
39945+
39946+#define BE_INVALID_PMAC_ID 0xffffffff
39947+#define BE_FLAGS_DCBX (1 << 16)
39948+
39949 struct be_adapter {
39950 struct pci_dev *pdev;
39951 struct net_device *netdev;
39952@@ -228,7 +342,7 @@ struct be_adapter {
39953 u8 __iomem *db; /* Door Bell */
39954 u8 __iomem *pcicfg; /* PCI config space */
39955
39956- spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
39957+ struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
39958 struct be_dma_mem mbox_mem;
39959 /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
39960 * is stored for freeing purpose */
39961@@ -238,66 +352,121 @@ struct be_adapter {
39962 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
39963 spinlock_t mcc_cq_lock;
39964
39965- struct msix_entry msix_entries[BE_NUM_MSIX_VECTORS];
39966- bool msix_enabled;
39967+ struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
39968+ u32 num_msix_vec;
39969 bool isr_registered;
39970
39971 /* TX Rings */
39972 struct be_eq_obj tx_eq;
39973- struct be_tx_obj tx_obj;
39974+ struct be_tx_obj tx_obj[MAX_TX_QS];
39975+ u8 num_tx_qs;
39976+ u8 prio_tc_map[MAX_TX_QS]; /* prio_tc_map[prio] => tc-id */
39977+ u8 tc_txq_map[MAX_TX_QS]; /* tc_txq_map[tc-id] => txq index */
39978
39979 u32 cache_line_break[8];
39980
39981 /* Rx rings */
39982- struct be_eq_obj rx_eq;
39983- struct be_rx_obj rx_obj;
39984+ struct be_rx_obj rx_obj[MAX_RX_QS]; /* one default non-rss Q */
39985+ u32 num_rx_qs;
39986+
39987+ struct be_dma_mem stats_cmd;
39988+ struct net_device_stats net_stats;
39989+ struct be_drv_stats drv_stats;
39990 u32 big_page_size; /* Compounded page size shared by rx wrbs */
39991- bool rx_post_starved; /* Zero rx frags have been posted to BE */
39992
39993 struct vlan_group *vlan_grp;
39994- u16 num_vlans;
39995+ u16 vlans_added;
39996+ u16 max_vlans; /* Number of vlans supported */
39997 u8 vlan_tag[VLAN_GROUP_ARRAY_LEN];
39998+ u8 vlan_prio_bmap; /* Available priority BitMap */
39999+ u16 recommended_prio; /* Recommended Priority */
40000+ struct be_dma_mem rx_filter;
40001
40002- struct be_stats_obj stats;
40003 /* Work queue used to perform periodic tasks like getting statistics */
40004 struct delayed_work work;
40005+ u16 work_counter;
40006
40007- /* Ethtool knobs and info */
40008- bool rx_csum; /* BE card must perform rx-checksumming */
40009+ u32 flags;
40010+ bool rx_csum; /* BE card must perform rx-checksumming */
40011+ u32 max_rx_coal;
40012 char fw_ver[FW_VER_LEN];
40013 u32 if_handle; /* Used to configure filtering */
40014 u32 pmac_id; /* MAC addr handle used by BE card */
40015+ u32 beacon_state; /* for set_phys_id */
40016
40017- bool link_up;
40018+ bool eeh_err;
40019+ int link_status;
40020 u32 port_num;
40021+ u32 hba_port_num;
40022 bool promiscuous;
40023- u32 cap;
40024+ bool wol;
40025+ u32 function_mode;
40026+ u32 function_caps;
40027 u32 rx_fc; /* Rx flow control */
40028 u32 tx_fc; /* Tx flow control */
40029+ bool ue_detected;
40030+ bool stats_cmd_sent;
40031+ bool gro_supported;
40032+ int link_speed;
40033+ u8 port_type;
40034+ u8 transceiver;
40035+ u8 autoneg;
40036 u8 generation; /* BladeEngine ASIC generation */
40037+ u32 flash_status;
40038+ struct completion flash_compl;
40039+
40040+ u8 eq_next_idx;
40041+ bool be3_native;
40042+ u16 num_vfs;
40043+ struct be_vf_cfg *vf_cfg;
40044+ u8 is_virtfn;
40045+ u16 pvid;
40046+ u32 sli_family;
40047+ u8 port_name[4];
40048+ char model_number[32];
40049 };
40050
40051 /* BladeEngine Generation numbers */
40052 #define BE_GEN2 2
40053 #define BE_GEN3 3
40054
40055-extern const struct ethtool_ops be_ethtool_ops;
40056+#define ON 1
40057+#define OFF 0
40058+#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3)
40059+#define lancer_A0_chip(adapter) \
40060+ (adapter->sli_family == LANCER_A0_SLI_FAMILY)
40061
40062-#define drvr_stats(adapter) (&adapter->stats.drvr_stats)
40063+extern struct ethtool_ops be_ethtool_ops;
40064
40065-static inline unsigned int be_pci_func(struct be_adapter *adapter)
40066-{
40067- return PCI_FUNC(adapter->pdev->devfn);
40068-}
40069+#define msix_enabled(adapter) (adapter->num_msix_vec > 0)
40070+#define tx_stats(txo) (&txo->stats)
40071+#define rx_stats(rxo) (&rxo->stats)
40072
40073+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
40074+#define BE_SET_NETDEV_OPS(netdev, ops) be_netdev_ops_init(netdev, ops)
40075+#else
40076 #define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
40077+#endif
40078+
40079+#define for_all_rx_queues(adapter, rxo, i) \
40080+ for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \
40081+ i++, rxo++)
40082+
40083+/* Just skip the first default non-rss queue */
40084+#define for_all_rss_queues(adapter, rxo, i) \
40085+ for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\
40086+ i++, rxo++)
40087+
40088+#define for_all_tx_queues(adapter, txo, i) \
40089+ for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \
40090+ i++, txo++)
40091
40092 #define PAGE_SHIFT_4K 12
40093 #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
40094
40095 /* Returns number of pages spanned by the data starting at the given addr */
40096-#define PAGES_4K_SPANNED(_address, size) \
40097- ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
40098+#define PAGES_4K_SPANNED(_address, size) \
40099+ ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
40100 (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
40101
40102 /* Byte offset into the page corresponding to given address */
40103@@ -305,7 +474,7 @@ static inline unsigned int be_pci_func(struct be_adapter *adapter)
40104 ((size_t)(addr) & (PAGE_SIZE_4K-1))
40105
40106 /* Returns bit offset within a DWORD of a bitfield */
40107-#define AMAP_BIT_OFFSET(_struct, field) \
40108+#define AMAP_BIT_OFFSET(_struct, field) \
40109 (((size_t)&(((_struct *)0)->field))%32)
40110
40111 /* Returns the bit mask of the field that is NOT shifted into location. */
40112@@ -356,6 +525,11 @@ static inline void swap_dws(void *wrb, int len)
40113 #endif /* __BIG_ENDIAN */
40114 }
40115
40116+static inline bool vlan_configured(struct be_adapter *adapter)
40117+{
40118+ return adapter->vlan_grp && adapter->vlans_added;
40119+}
40120+
40121 static inline u8 is_tcp_pkt(struct sk_buff *skb)
40122 {
40123 u8 val = 0;
40124@@ -380,9 +554,65 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
40125 return val;
40126 }
40127
40128+static inline u8 is_ipv6_ext_hdr(struct sk_buff *skb)
40129+{
40130+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
40131+ if (ip_hdr(skb)->version == 6)
40132+ return ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr);
40133+ else
40134+#endif
40135+ return 0;
40136+}
40137+
40138+static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
40139+{
40140+ u32 sli_intf;
40141+
40142+ pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
40143+ adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
40144+}
40145+
40146+static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
40147+{
40148+ u32 addr;
40149+
40150+ addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
40151+
40152+ mac[5] = (u8)(addr & 0xFF);
40153+ mac[4] = (u8)((addr >> 8) & 0xFF);
40154+ mac[3] = (u8)((addr >> 16) & 0xFF);
40155+ /* Use the OUI programmed in hardware */
40156+ memcpy(mac, adapter->netdev->dev_addr, 3);
40157+}
40158+
40159+static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
40160+ struct sk_buff *skb)
40161+{
40162+ u8 vlan_prio = 0;
40163+ u16 vlan_tag = 0;
40164+
40165+ vlan_tag = vlan_tx_tag_get(skb);
40166+ vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
40167+ /* If vlan priority provided by OS is NOT in available bmap */
40168+ if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
40169+ vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
40170+ adapter->recommended_prio;
40171+
40172+ return vlan_tag;
40173+}
40174+
40175+#define be_physfn(adapter) (!adapter->is_virtfn)
40176+
40177 extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
40178 u16 num_popped);
40179-extern void be_link_status_update(struct be_adapter *adapter, bool link_up);
40180+extern void be_link_status_update(struct be_adapter *adapter, int link_status);
40181 extern void netdev_stats_update(struct be_adapter *adapter);
40182+extern void be_parse_stats(struct be_adapter *adapter);
40183 extern int be_load_fw(struct be_adapter *adapter, u8 *func);
40184+
40185+#ifdef CONFIG_PALAU
40186+extern void be_sysfs_create_group(struct be_adapter *adapter);
40187+extern void be_sysfs_remove_group(struct be_adapter *adapter);
40188+#endif
40189+
40190 #endif /* BE_H */
40191diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
40192index 28a0eda..b4ca89c 100644
40193--- a/drivers/net/benet/be_cmds.c
40194+++ b/drivers/net/benet/be_cmds.c
40195@@ -1,30 +1,45 @@
40196 /*
40197- * Copyright (C) 2005 - 2009 ServerEngines
40198+ * Copyright (C) 2005 - 2011 Emulex
40199 * All rights reserved.
40200 *
40201 * This program is free software; you can redistribute it and/or
40202 * modify it under the terms of the GNU General Public License version 2
40203- * as published by the Free Software Foundation. The full GNU General
40204+ * as published by the Free Software Foundation. The full GNU General
40205 * Public License is included in this distribution in the file called COPYING.
40206 *
40207 * Contact Information:
40208- * linux-drivers@serverengines.com
40209+ * linux-drivers@emulex.com
40210 *
40211- * ServerEngines
40212- * 209 N. Fair Oaks Ave
40213- * Sunnyvale, CA 94085
40214+ * Emulex
40215+ * 3333 Susan Street
40216+ * Costa Mesa, CA 92626
40217 */
40218
40219 #include "be.h"
40220 #include "be_cmds.h"
40221
40222+/* Must be a power of 2 or else MODULO will BUG_ON */
40223+static int be_get_temp_freq = 64;
40224+
40225+static inline void *embedded_payload(struct be_mcc_wrb *wrb)
40226+{
40227+ return wrb->payload.embedded_payload;
40228+}
40229+
40230 static void be_mcc_notify(struct be_adapter *adapter)
40231 {
40232 struct be_queue_info *mccq = &adapter->mcc_obj.q;
40233 u32 val = 0;
40234
40235+ if (adapter->eeh_err) {
40236+ dev_info(&adapter->pdev->dev, "Error in Card Detected! Cannot issue commands\n");
40237+ return;
40238+ }
40239+
40240 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
40241 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
40242+
40243+ wmb();
40244 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
40245 }
40246
40247@@ -59,21 +74,67 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
40248
40249 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
40250 CQE_STATUS_COMPL_MASK;
40251+
40252+ if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
40253+ (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
40254+ adapter->flash_status = compl_status;
40255+ complete(&adapter->flash_compl);
40256+ }
40257+
40258 if (compl_status == MCC_STATUS_SUCCESS) {
40259- if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
40260- struct be_cmd_resp_get_stats *resp =
40261- adapter->stats.cmd.va;
40262- be_dws_le_to_cpu(&resp->hw_stats,
40263- sizeof(resp->hw_stats));
40264+ if ((compl->tag0 == OPCODE_ETH_GET_STATISTICS) &&
40265+ (compl->tag1 == CMD_SUBSYSTEM_ETH)) {
40266+ if (adapter->generation == BE_GEN3) {
40267+ struct be_cmd_resp_get_stats_v1 *resp =
40268+ adapter->stats_cmd.va;
40269+
40270+ be_dws_le_to_cpu(&resp->hw_stats,
40271+ sizeof(resp->hw_stats));
40272+ } else {
40273+ struct be_cmd_resp_get_stats_v0 *resp =
40274+ adapter->stats_cmd.va;
40275+
40276+ be_dws_le_to_cpu(&resp->hw_stats,
40277+ sizeof(resp->hw_stats));
40278+ }
40279+ be_parse_stats(adapter);
40280 netdev_stats_update(adapter);
40281+ adapter->stats_cmd_sent = false;
40282+ }
40283+ if (compl->tag0 ==
40284+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) {
40285+ struct be_mcc_wrb *mcc_wrb =
40286+ queue_index_node(&adapter->mcc_obj.q,
40287+ compl->tag1);
40288+ struct be_cmd_resp_get_cntl_addnl_attribs *resp =
40289+ embedded_payload(mcc_wrb);
40290+ adapter->drv_stats.be_on_die_temperature =
40291+ resp->on_die_temperature;
40292+ }
40293+ } else {
40294+ if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
40295+ be_get_temp_freq = 0;
40296+
40297+ if (compl->tag1 == MCC_WRB_PASS_THRU)
40298+ goto done;
40299+
40300+ if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
40301+ compl_status == MCC_STATUS_ILLEGAL_REQUEST)
40302+ goto done;
40303+
40304+ if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
40305+ dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
40306+ "permitted to execute this cmd (opcode %d)\n",
40307+ compl->tag0);
40308+ } else {
40309+ extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
40310+ CQE_STATUS_EXTD_MASK;
40311+ dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
40312+ "status %d, extd-status %d\n",
40313+ compl->tag0, compl_status, extd_status);
40314 }
40315- } else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
40316- extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
40317- CQE_STATUS_EXTD_MASK;
40318- dev_warn(&adapter->pdev->dev,
40319- "Error in cmd completion: status(compl/extd)=%d/%d\n",
40320- compl_status, extd_status);
40321 }
40322+done:
40323 return compl_status;
40324 }
40325
40326@@ -82,7 +143,70 @@ static void be_async_link_state_process(struct be_adapter *adapter,
40327 struct be_async_event_link_state *evt)
40328 {
40329 be_link_status_update(adapter,
40330- evt->port_link_status == ASYNC_EVENT_LINK_UP);
40331+ ((evt->port_link_status & ~ASYNC_EVENT_LOGICAL) ==
40332+ ASYNC_EVENT_LINK_UP ? LINK_UP : LINK_DOWN));
40333+}
40334+
40335+/* Grp5 CoS Priority evt */
40336+static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
40337+ struct be_async_event_grp5_cos_priority *evt)
40338+{
40339+ if (evt->valid) {
40340+ adapter->vlan_prio_bmap = evt->available_priority_bmap;
40341+ adapter->recommended_prio &= ~VLAN_PRIO_MASK;
40342+ adapter->recommended_prio =
40343+ evt->reco_default_priority << VLAN_PRIO_SHIFT;
40344+ }
40345+}
40346+
40347+/* Grp5 QOS Speed evt */
40348+static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
40349+ struct be_async_event_grp5_qos_link_speed *evt)
40350+{
40351+ if (evt->physical_port == adapter->hba_port_num) {
40352+ /* qos_link_speed is in units of 10 Mbps */
40353+ adapter->link_speed = evt->qos_link_speed * 10;
40354+ }
40355+}
40356+
40357+/*Grp5 PVID evt*/
40358+static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
40359+ struct be_async_event_grp5_pvid_state *evt)
40360+{
40361+ if (evt->enabled)
40362+ adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK ;
40363+ else
40364+ adapter->pvid = 0;
40365+}
40366+
40367+static void be_async_grp5_evt_process(struct be_adapter *adapter,
40368+ u32 trailer, struct be_mcc_compl *evt)
40369+{
40370+ u8 event_type = 0;
40371+
40372+ event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
40373+ ASYNC_TRAILER_EVENT_TYPE_MASK;
40374+
40375+ switch (event_type) {
40376+ case ASYNC_EVENT_COS_PRIORITY:
40377+ be_async_grp5_cos_priority_process(adapter,
40378+ (struct be_async_event_grp5_cos_priority *)evt);
40379+ break;
40380+ case ASYNC_EVENT_QOS_SPEED:
40381+ be_async_grp5_qos_speed_process(adapter,
40382+ (struct be_async_event_grp5_qos_link_speed *)evt);
40383+ break;
40384+ case ASYNC_EVENT_PVID_STATE:
40385+ be_async_grp5_pvid_state_process(adapter,
40386+ (struct be_async_event_grp5_pvid_state *)evt);
40387+ break;
40388+ case GRP5_TYPE_PRIO_TC_MAP:
40389+ memcpy(adapter->prio_tc_map, evt, MAX_TX_QS);
40390+ break;
40391+ default:
40392+ printk(KERN_WARNING "Unknown grp5 event!\n");
40393+ break;
40394+ }
40395 }
40396
40397 static inline bool is_link_state_evt(u32 trailer)
40398@@ -92,6 +216,13 @@ static inline bool is_link_state_evt(u32 trailer)
40399 ASYNC_EVENT_CODE_LINK_STATE);
40400 }
40401
40402+static inline bool is_grp5_evt(u32 trailer)
40403+{
40404+ return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
40405+ ASYNC_TRAILER_EVENT_CODE_MASK) ==
40406+ ASYNC_EVENT_CODE_GRP_5);
40407+}
40408+
40409 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
40410 {
40411 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
40412@@ -104,46 +235,67 @@ static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
40413 return NULL;
40414 }
40415
40416-int be_process_mcc(struct be_adapter *adapter)
40417+void be_async_mcc_enable(struct be_adapter *adapter)
40418+{
40419+ spin_lock_bh(&adapter->mcc_cq_lock);
40420+
40421+ be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
40422+ adapter->mcc_obj.rearm_cq = true;
40423+
40424+ spin_unlock_bh(&adapter->mcc_cq_lock);
40425+}
40426+
40427+void be_async_mcc_disable(struct be_adapter *adapter)
40428+{
40429+ adapter->mcc_obj.rearm_cq = false;
40430+}
40431+
40432+int be_process_mcc(struct be_adapter *adapter, int *status)
40433 {
40434 struct be_mcc_compl *compl;
40435- int num = 0, status = 0;
40436+ int num = 0;
40437+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
40438
40439 spin_lock_bh(&adapter->mcc_cq_lock);
40440 while ((compl = be_mcc_compl_get(adapter))) {
40441 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
40442 /* Interpret flags as an async trailer */
40443- BUG_ON(!is_link_state_evt(compl->flags));
40444-
40445- /* Interpret compl as a async link evt */
40446- be_async_link_state_process(adapter,
40447+ if (is_link_state_evt(compl->flags))
40448+ be_async_link_state_process(adapter,
40449 (struct be_async_event_link_state *) compl);
40450+ else if (is_grp5_evt(compl->flags))
40451+ be_async_grp5_evt_process(adapter,
40452+ compl->flags, compl);
40453+
40454 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
40455- status = be_mcc_compl_process(adapter, compl);
40456- atomic_dec(&adapter->mcc_obj.q.used);
40457+ *status = be_mcc_compl_process(adapter, compl);
40458+ atomic_dec(&mcc_obj->q.used);
40459 }
40460 be_mcc_compl_use(compl);
40461 num++;
40462 }
40463
40464- if (num)
40465- be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num);
40466-
40467 spin_unlock_bh(&adapter->mcc_cq_lock);
40468- return status;
40469+ return num;
40470 }
40471
40472 /* Wait till no more pending mcc requests are present */
40473 static int be_mcc_wait_compl(struct be_adapter *adapter)
40474 {
40475 #define mcc_timeout 120000 /* 12s timeout */
40476- int i, status;
40477+ int i, num, status = 0;
40478+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
40479+
40480+ if (adapter->eeh_err)
40481+ return -EIO;
40482+
40483 for (i = 0; i < mcc_timeout; i++) {
40484- status = be_process_mcc(adapter);
40485- if (status)
40486- return status;
40487+ num = be_process_mcc(adapter, &status);
40488+ if (num)
40489+ be_cq_notify(adapter, mcc_obj->cq.id,
40490+ mcc_obj->rearm_cq, num);
40491
40492- if (atomic_read(&adapter->mcc_obj.q.used) == 0)
40493+ if (atomic_read(&mcc_obj->q.used) == 0)
40494 break;
40495 udelay(100);
40496 }
40497@@ -151,7 +303,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
40498 dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
40499 return -1;
40500 }
40501- return 0;
40502+ return status;
40503 }
40504
40505 /* Notify MCC requests and wait for completion */
40506@@ -163,23 +315,34 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
40507
40508 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
40509 {
40510- int cnt = 0, wait = 5;
40511+ int msecs = 0;
40512 u32 ready;
40513
40514+ if (adapter->eeh_err) {
40515+ dev_err(&adapter->pdev->dev, "Error detected in card.Cannot issue commands\n");
40516+ return -EIO;
40517+ }
40518 do {
40519- ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
40520+ ready = ioread32(db);
40521+ if (ready == 0xffffffff) {
40522+ dev_err(&adapter->pdev->dev,
40523+ "pci slot disconnected\n");
40524+ return -1;
40525+ }
40526+
40527+ ready &= MPU_MAILBOX_DB_RDY_MASK;
40528 if (ready)
40529 break;
40530
40531- if (cnt > 4000000) {
40532+ if (msecs > 4000) {
40533 dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
40534+ be_detect_dump_ue(adapter);
40535 return -1;
40536 }
40537
40538- if (cnt > 50)
40539- wait = 200;
40540- cnt += wait;
40541- udelay(wait);
40542+ set_current_state(TASK_UNINTERRUPTIBLE);
40543+ schedule_timeout(msecs_to_jiffies(1));
40544+ msecs++;
40545 } while (true);
40546
40547 return 0;
40548@@ -198,6 +361,11 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
40549 struct be_mcc_mailbox *mbox = mbox_mem->va;
40550 struct be_mcc_compl *compl = &mbox->compl;
40551
40552+ /* wait for ready to be set */
40553+ status = be_mbox_db_ready_wait(adapter, db);
40554+ if (status != 0)
40555+ return status;
40556+
40557 val |= MPU_MAILBOX_DB_HI_MASK;
40558 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
40559 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
40560@@ -232,7 +400,12 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
40561
40562 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
40563 {
40564- u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
40565+ u32 sem;
40566+
40567+ if (lancer_chip(adapter))
40568+ sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
40569+ else
40570+ sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
40571
40572 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
40573 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
40574@@ -245,30 +418,29 @@ int be_cmd_POST(struct be_adapter *adapter)
40575 {
40576 u16 stage;
40577 int status, timeout = 0;
40578+ struct device *dev = &adapter->pdev->dev;
40579
40580 do {
40581 status = be_POST_stage_get(adapter, &stage);
40582 if (status) {
40583- dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
40584- stage);
40585+ dev_err(dev, "POST error; stage=0x%x\n", stage);
40586 return -1;
40587 } else if (stage != POST_STAGE_ARMFW_RDY) {
40588 set_current_state(TASK_INTERRUPTIBLE);
40589- schedule_timeout(2 * HZ);
40590+ if (schedule_timeout(2 * HZ)) {
40591+ dev_err(dev, "POST cmd aborted\n");
40592+ return -EINTR;
40593+ }
40594 timeout += 2;
40595 } else {
40596 return 0;
40597 }
40598- } while (timeout < 20);
40599+ } while (timeout < 40);
40600
40601- dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
40602+ dev_err(dev, "POST timeout; stage=0x%x\n", stage);
40603 return -1;
40604 }
40605
40606-static inline void *embedded_payload(struct be_mcc_wrb *wrb)
40607-{
40608- return wrb->payload.embedded_payload;
40609-}
40610
40611 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
40612 {
40613@@ -277,7 +449,7 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
40614
40615 /* Don't touch the hdr after it's prepared */
40616 static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
40617- bool embedded, u8 sge_cnt)
40618+ bool embedded, u8 sge_cnt, u32 opcode)
40619 {
40620 if (embedded)
40621 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
40622@@ -285,7 +457,8 @@ static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
40623 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
40624 MCC_WRB_SGE_CNT_SHIFT;
40625 wrb->payload_length = payload_len;
40626- be_dws_cpu_to_le(wrb, 20);
40627+ wrb->tag0 = opcode;
40628+ be_dws_cpu_to_le(wrb, 8);
40629 }
40630
40631 /* Don't touch the hdr after it's prepared */
40632@@ -295,6 +468,7 @@ static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
40633 req_hdr->opcode = opcode;
40634 req_hdr->subsystem = subsystem;
40635 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
40636+ req_hdr->version = 0;
40637 }
40638
40639 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
40640@@ -349,7 +523,11 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
40641 struct be_queue_info *mccq = &adapter->mcc_obj.q;
40642 struct be_mcc_wrb *wrb;
40643
40644- BUG_ON(atomic_read(&mccq->used) >= mccq->len);
40645+ if (atomic_read(&mccq->used) >= mccq->len) {
40646+ dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
40647+ return NULL;
40648+ }
40649+
40650 wrb = queue_head_node(mccq);
40651 queue_head_inc(mccq);
40652 atomic_inc(&mccq->used);
40653@@ -357,6 +535,59 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
40654 return wrb;
40655 }
40656
40657+/* Tell fw we're about to start firing cmds by writing a
40658+ * special pattern across the wrb hdr; uses mbox
40659+ */
40660+int be_cmd_fw_init(struct be_adapter *adapter)
40661+{
40662+ u8 *wrb;
40663+ int status;
40664+
40665+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40666+ return -1;
40667+
40668+ wrb = (u8 *)wrb_from_mbox(adapter);
40669+ *wrb++ = 0xFF;
40670+ *wrb++ = 0x12;
40671+ *wrb++ = 0x34;
40672+ *wrb++ = 0xFF;
40673+ *wrb++ = 0xFF;
40674+ *wrb++ = 0x56;
40675+ *wrb++ = 0x78;
40676+ *wrb = 0xFF;
40677+
40678+ status = be_mbox_notify_wait(adapter);
40679+
40680+ mutex_unlock(&adapter->mbox_lock);
40681+ return status;
40682+}
40683+
40684+/* Tell fw we're done with firing cmds by writing a
40685+ * special pattern across the wrb hdr; uses mbox
40686+ */
40687+int be_cmd_fw_clean(struct be_adapter *adapter)
40688+{
40689+ u8 *wrb;
40690+ int status;
40691+
40692+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40693+ return -1;
40694+
40695+ wrb = (u8 *)wrb_from_mbox(adapter);
40696+ *wrb++ = 0xFF;
40697+ *wrb++ = 0xAA;
40698+ *wrb++ = 0xBB;
40699+ *wrb++ = 0xFF;
40700+ *wrb++ = 0xFF;
40701+ *wrb++ = 0xCC;
40702+ *wrb++ = 0xDD;
40703+ *wrb = 0xFF;
40704+
40705+ status = be_mbox_notify_wait(adapter);
40706+
40707+ mutex_unlock(&adapter->mbox_lock);
40708+ return status;
40709+}
40710 int be_cmd_eq_create(struct be_adapter *adapter,
40711 struct be_queue_info *eq, int eq_delay)
40712 {
40713@@ -365,20 +596,19 @@ int be_cmd_eq_create(struct be_adapter *adapter,
40714 struct be_dma_mem *q_mem = &eq->dma_mem;
40715 int status;
40716
40717- spin_lock(&adapter->mbox_lock);
40718+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40719+ return -1;
40720
40721 wrb = wrb_from_mbox(adapter);
40722 req = embedded_payload(wrb);
40723
40724- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40725+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
40726
40727 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40728 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
40729
40730 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
40731
40732- AMAP_SET_BITS(struct amap_eq_context, func, req->context,
40733- be_pci_func(adapter));
40734 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
40735 /* 4byte eqe*/
40736 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
40737@@ -397,7 +627,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
40738 eq->created = true;
40739 }
40740
40741- spin_unlock(&adapter->mbox_lock);
40742+ mutex_unlock(&adapter->mbox_lock);
40743 return status;
40744 }
40745
40746@@ -409,12 +639,14 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
40747 struct be_cmd_req_mac_query *req;
40748 int status;
40749
40750- spin_lock(&adapter->mbox_lock);
40751+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40752+ return -1;
40753
40754 wrb = wrb_from_mbox(adapter);
40755 req = embedded_payload(wrb);
40756
40757- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40758+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40759+ OPCODE_COMMON_NTWK_MAC_QUERY);
40760
40761 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40762 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
40763@@ -433,13 +665,13 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
40764 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
40765 }
40766
40767- spin_unlock(&adapter->mbox_lock);
40768+ mutex_unlock(&adapter->mbox_lock);
40769 return status;
40770 }
40771
40772 /* Uses synchronous MCCQ */
40773 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
40774- u32 if_id, u32 *pmac_id)
40775+ u32 if_id, u32 *pmac_id, u32 domain)
40776 {
40777 struct be_mcc_wrb *wrb;
40778 struct be_cmd_req_pmac_add *req;
40779@@ -448,13 +680,19 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
40780 spin_lock_bh(&adapter->mcc_lock);
40781
40782 wrb = wrb_from_mccq(adapter);
40783+ if (!wrb) {
40784+ status = -EBUSY;
40785+ goto err;
40786+ }
40787 req = embedded_payload(wrb);
40788
40789- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40790+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40791+ OPCODE_COMMON_NTWK_PMAC_ADD);
40792
40793 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40794 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
40795
40796+ req->hdr.domain = domain;
40797 req->if_id = cpu_to_le32(if_id);
40798 memcpy(req->mac_address, mac_addr, ETH_ALEN);
40799
40800@@ -464,12 +702,13 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
40801 *pmac_id = le32_to_cpu(resp->pmac_id);
40802 }
40803
40804+err:
40805 spin_unlock_bh(&adapter->mcc_lock);
40806 return status;
40807 }
40808
40809 /* Uses synchronous MCCQ */
40810-int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
40811+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
40812 {
40813 struct be_mcc_wrb *wrb;
40814 struct be_cmd_req_pmac_del *req;
40815@@ -478,20 +717,26 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
40816 spin_lock_bh(&adapter->mcc_lock);
40817
40818 wrb = wrb_from_mccq(adapter);
40819+ if (!wrb) {
40820+ status = -EBUSY;
40821+ goto err;
40822+ }
40823 req = embedded_payload(wrb);
40824
40825- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40826+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40827+ OPCODE_COMMON_NTWK_PMAC_DEL);
40828
40829 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40830 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
40831
40832+ req->hdr.domain = dom;
40833 req->if_id = cpu_to_le32(if_id);
40834 req->pmac_id = cpu_to_le32(pmac_id);
40835
40836 status = be_mcc_notify_wait(adapter);
40837
40838+err:
40839 spin_unlock_bh(&adapter->mcc_lock);
40840-
40841 return status;
40842 }
40843
40844@@ -506,29 +751,51 @@ int be_cmd_cq_create(struct be_adapter *adapter,
40845 void *ctxt;
40846 int status;
40847
40848- spin_lock(&adapter->mbox_lock);
40849+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40850+ return -1;
40851
40852 wrb = wrb_from_mbox(adapter);
40853 req = embedded_payload(wrb);
40854 ctxt = &req->context;
40855
40856- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40857+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40858+ OPCODE_COMMON_CQ_CREATE);
40859
40860 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40861 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
40862
40863 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
40864
40865- AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
40866- AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
40867- AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
40868- __ilog2_u32(cq->len/256));
40869- AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
40870- AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
40871- AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
40872- AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
40873- AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
40874- AMAP_SET_BITS(struct amap_cq_context, func, ctxt, be_pci_func(adapter));
40875+ if (lancer_chip(adapter)) {
40876+ req->hdr.version = 2;
40877+ req->page_size = 1; /* 1 for 4K */
40878+ AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
40879+ coalesce_wm);
40880+ AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
40881+ no_delay);
40882+ AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
40883+ __ilog2_u32(cq->len/256));
40884+ AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
40885+ AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
40886+ ctxt, 1);
40887+ AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
40888+ ctxt, eq->id);
40889+ AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
40890+ } else {
40891+ AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
40892+ coalesce_wm);
40893+ AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
40894+ ctxt, no_delay);
40895+ AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
40896+ __ilog2_u32(cq->len/256));
40897+ AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
40898+ AMAP_SET_BITS(struct amap_cq_context_be, solevent,
40899+ ctxt, sol_evts);
40900+ AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
40901+ AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
40902+ AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
40903+ }
40904+
40905 be_dws_cpu_to_le(ctxt, sizeof(req->context));
40906
40907 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
40908@@ -540,8 +807,7 @@ int be_cmd_cq_create(struct be_adapter *adapter,
40909 cq->created = true;
40910 }
40911
40912- spin_unlock(&adapter->mbox_lock);
40913-
40914+ mutex_unlock(&adapter->mbox_lock);
40915 return status;
40916 }
40917
40918@@ -553,7 +819,68 @@ static u32 be_encoded_q_len(int q_len)
40919 return len_encoded;
40920 }
40921
40922-int be_cmd_mccq_create(struct be_adapter *adapter,
40923+int be_cmd_mccq_ext_create(struct be_adapter *adapter,
40924+ struct be_queue_info *mccq,
40925+ struct be_queue_info *cq)
40926+{
40927+ struct be_mcc_wrb *wrb;
40928+ struct be_cmd_req_mcc_ext_create *req;
40929+ struct be_dma_mem *q_mem = &mccq->dma_mem;
40930+ void *ctxt;
40931+ int status;
40932+
40933+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40934+ return -1;
40935+
40936+ wrb = wrb_from_mbox(adapter);
40937+ req = embedded_payload(wrb);
40938+ ctxt = &req->context;
40939+
40940+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40941+ OPCODE_COMMON_MCC_CREATE_EXT);
40942+
40943+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40944+ OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
40945+
40946+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
40947+ if (lancer_chip(adapter)) {
40948+ req->hdr.version = 1;
40949+ req->cq_id = cpu_to_le16(cq->id);
40950+
40951+ AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
40952+ be_encoded_q_len(mccq->len));
40953+ AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
40954+ AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
40955+ ctxt, cq->id);
40956+ AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
40957+ ctxt, 1);
40958+
40959+ } else {
40960+ AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
40961+ AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
40962+ be_encoded_q_len(mccq->len));
40963+ AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
40964+ }
40965+
40966+ /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
40967+ req->async_event_bitmap[0] |= cpu_to_le32(0x00000022);
40968+
40969+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
40970+
40971+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
40972+
40973+ status = be_mbox_notify_wait(adapter);
40974+ if (!status) {
40975+ struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
40976+ mccq->id = le16_to_cpu(resp->id);
40977+ mccq->created = true;
40978+ }
40979+
40980+ mutex_unlock(&adapter->mbox_lock);
40981+ return status;
40982+}
40983+
40984+int be_cmd_mccq_org_create(struct be_adapter *adapter,
40985 struct be_queue_info *mccq,
40986 struct be_queue_info *cq)
40987 {
40988@@ -563,24 +890,25 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
40989 void *ctxt;
40990 int status;
40991
40992- spin_lock(&adapter->mbox_lock);
40993+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40994+ return -1;
40995
40996 wrb = wrb_from_mbox(adapter);
40997 req = embedded_payload(wrb);
40998 ctxt = &req->context;
40999
41000- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41001+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41002+ OPCODE_COMMON_MCC_CREATE);
41003
41004 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41005 OPCODE_COMMON_MCC_CREATE, sizeof(*req));
41006
41007- req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
41008+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
41009
41010- AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, be_pci_func(adapter));
41011- AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
41012- AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
41013- be_encoded_q_len(mccq->len));
41014- AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
41015+ AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
41016+ AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
41017+ be_encoded_q_len(mccq->len));
41018+ AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
41019
41020 be_dws_cpu_to_le(ctxt, sizeof(req->context));
41021
41022@@ -592,75 +920,93 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
41023 mccq->id = le16_to_cpu(resp->id);
41024 mccq->created = true;
41025 }
41026- spin_unlock(&adapter->mbox_lock);
41027
41028+ mutex_unlock(&adapter->mbox_lock);
41029 return status;
41030 }
41031
41032-int be_cmd_txq_create(struct be_adapter *adapter,
41033- struct be_queue_info *txq,
41034+int be_cmd_mccq_create(struct be_adapter *adapter,
41035+ struct be_queue_info *mccq,
41036 struct be_queue_info *cq)
41037 {
41038+ int status;
41039+
41040+ status = be_cmd_mccq_ext_create(adapter, mccq, cq);
41041+ if (status && !lancer_chip(adapter)) {
41042+ dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
41043+ "or newer to avoid conflicting priorities between NIC "
41044+ "and FCoE traffic");
41045+ status = be_cmd_mccq_org_create(adapter, mccq, cq);
41046+ }
41047+ return status;
41048+}
41049+
41050+int be_cmd_txq_create(struct be_adapter *adapter, struct be_queue_info *txq,
41051+ struct be_queue_info *cq, u8 *tc_id)
41052+{
41053 struct be_mcc_wrb *wrb;
41054 struct be_cmd_req_eth_tx_create *req;
41055 struct be_dma_mem *q_mem = &txq->dma_mem;
41056- void *ctxt;
41057 int status;
41058
41059- spin_lock(&adapter->mbox_lock);
41060+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41061+ return -1;
41062
41063 wrb = wrb_from_mbox(adapter);
41064 req = embedded_payload(wrb);
41065- ctxt = &req->context;
41066-
41067- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41068
41069+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_TX_CREATE);
41070 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
41071 sizeof(*req));
41072
41073- req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
41074+ if (adapter->flags & BE_FLAGS_DCBX || lancer_chip(adapter)) {
41075+ req->hdr.version = 1;
41076+ req->if_id = cpu_to_le16(adapter->if_handle);
41077+ }
41078+ if (adapter->flags & BE_FLAGS_DCBX)
41079+ req->type = cpu_to_le16(ETX_QUEUE_TYPE_PRIORITY);
41080+ else
41081+ req->type = cpu_to_le16(ETX_QUEUE_TYPE_STANDARD);
41082 req->ulp_num = BE_ULP1_NUM;
41083- req->type = BE_ETH_TX_RING_TYPE_STANDARD;
41084-
41085- AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
41086- be_encoded_q_len(txq->len));
41087- AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
41088- be_pci_func(adapter));
41089- AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
41090- AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
41091-
41092- be_dws_cpu_to_le(ctxt, sizeof(req->context));
41093-
41094+ req->cq_id = cpu_to_le16(cq->id);
41095+ req->queue_size = be_encoded_q_len(txq->len);
41096+ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
41097 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
41098
41099 status = be_mbox_notify_wait(adapter);
41100 if (!status) {
41101 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
41102 txq->id = le16_to_cpu(resp->cid);
41103+ if (adapter->flags & BE_FLAGS_DCBX)
41104+ *tc_id = resp->tc_id;
41105 txq->created = true;
41106 }
41107
41108- spin_unlock(&adapter->mbox_lock);
41109-
41110+ mutex_unlock(&adapter->mbox_lock);
41111 return status;
41112 }
41113
41114-/* Uses mbox */
41115+/* Uses MCC */
41116 int be_cmd_rxq_create(struct be_adapter *adapter,
41117 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
41118- u16 max_frame_size, u32 if_id, u32 rss)
41119+ u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
41120 {
41121 struct be_mcc_wrb *wrb;
41122 struct be_cmd_req_eth_rx_create *req;
41123 struct be_dma_mem *q_mem = &rxq->dma_mem;
41124 int status;
41125
41126- spin_lock(&adapter->mbox_lock);
41127+ spin_lock_bh(&adapter->mcc_lock);
41128
41129- wrb = wrb_from_mbox(adapter);
41130+ wrb = wrb_from_mccq(adapter);
41131+ if (!wrb) {
41132+ status = -EBUSY;
41133+ goto err;
41134+ }
41135 req = embedded_payload(wrb);
41136
41137- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41138+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41139+ OPCODE_ETH_RX_CREATE);
41140
41141 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
41142 sizeof(*req));
41143@@ -673,15 +1019,16 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
41144 req->max_frame_size = cpu_to_le16(max_frame_size);
41145 req->rss_queue = cpu_to_le32(rss);
41146
41147- status = be_mbox_notify_wait(adapter);
41148+ status = be_mcc_notify_wait(adapter);
41149 if (!status) {
41150 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
41151 rxq->id = le16_to_cpu(resp->id);
41152 rxq->created = true;
41153+ *rss_id = resp->rss_id;
41154 }
41155
41156- spin_unlock(&adapter->mbox_lock);
41157-
41158+err:
41159+ spin_unlock_bh(&adapter->mcc_lock);
41160 return status;
41161 }
41162
41163@@ -696,13 +1043,12 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
41164 u8 subsys = 0, opcode = 0;
41165 int status;
41166
41167- spin_lock(&adapter->mbox_lock);
41168+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41169+ return -1;
41170
41171 wrb = wrb_from_mbox(adapter);
41172 req = embedded_payload(wrb);
41173
41174- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41175-
41176 switch (queue_type) {
41177 case QTYPE_EQ:
41178 subsys = CMD_SUBSYSTEM_COMMON;
41179@@ -727,13 +1073,47 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
41180 default:
41181 BUG();
41182 }
41183+
41184+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
41185+
41186 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
41187 req->id = cpu_to_le16(q->id);
41188
41189 status = be_mbox_notify_wait(adapter);
41190+ if (!status)
41191+ q->created = false;
41192
41193- spin_unlock(&adapter->mbox_lock);
41194+ mutex_unlock(&adapter->mbox_lock);
41195+ return status;
41196+}
41197
41198+/* Uses MCC */
41199+int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
41200+{
41201+ struct be_mcc_wrb *wrb;
41202+ struct be_cmd_req_q_destroy *req;
41203+ int status;
41204+
41205+ spin_lock_bh(&adapter->mcc_lock);
41206+
41207+ wrb = wrb_from_mccq(adapter);
41208+ if (!wrb) {
41209+ status = -EBUSY;
41210+ goto err;
41211+ }
41212+ req = embedded_payload(wrb);
41213+
41214+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_RX_DESTROY);
41215+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_DESTROY,
41216+ sizeof(*req));
41217+ req->id = cpu_to_le16(q->id);
41218+
41219+ status = be_mcc_notify_wait(adapter);
41220+ if (!status)
41221+ q->created = false;
41222+
41223+err:
41224+ spin_unlock_bh(&adapter->mcc_lock);
41225 return status;
41226 }
41227
41228@@ -741,22 +1121,26 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
41229 * Uses mbox
41230 */
41231 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
41232- u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
41233+ u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
41234+ u32 domain)
41235 {
41236 struct be_mcc_wrb *wrb;
41237 struct be_cmd_req_if_create *req;
41238 int status;
41239
41240- spin_lock(&adapter->mbox_lock);
41241+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41242+ return -1;
41243
41244 wrb = wrb_from_mbox(adapter);
41245 req = embedded_payload(wrb);
41246
41247- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41248+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41249+ OPCODE_COMMON_NTWK_INTERFACE_CREATE);
41250
41251 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41252 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
41253
41254+ req->hdr.domain = domain;
41255 req->capability_flags = cpu_to_le32(cap_flags);
41256 req->enable_flags = cpu_to_le32(en_flags);
41257 req->pmac_invalid = pmac_invalid;
41258@@ -771,33 +1155,35 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
41259 *pmac_id = le32_to_cpu(resp->pmac_id);
41260 }
41261
41262- spin_unlock(&adapter->mbox_lock);
41263+ mutex_unlock(&adapter->mbox_lock);
41264 return status;
41265 }
41266
41267 /* Uses mbox */
41268-int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
41269+int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
41270 {
41271 struct be_mcc_wrb *wrb;
41272 struct be_cmd_req_if_destroy *req;
41273 int status;
41274
41275- spin_lock(&adapter->mbox_lock);
41276+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41277+ return -1;
41278
41279 wrb = wrb_from_mbox(adapter);
41280 req = embedded_payload(wrb);
41281
41282- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41283+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41284+ OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
41285
41286 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41287 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
41288
41289+ req->hdr.domain = domain;
41290 req->interface_id = cpu_to_le32(interface_id);
41291
41292 status = be_mbox_notify_wait(adapter);
41293
41294- spin_unlock(&adapter->mbox_lock);
41295-
41296+ mutex_unlock(&adapter->mbox_lock);
41297 return status;
41298 }
41299
41300@@ -808,33 +1194,48 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
41301 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
41302 {
41303 struct be_mcc_wrb *wrb;
41304- struct be_cmd_req_get_stats *req;
41305+ struct be_cmd_req_hdr *hdr;
41306 struct be_sge *sge;
41307+ int status = 0;
41308+
41309+ if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
41310+ be_cmd_get_die_temperature(adapter);
41311
41312 spin_lock_bh(&adapter->mcc_lock);
41313
41314 wrb = wrb_from_mccq(adapter);
41315- req = nonemb_cmd->va;
41316+ if (!wrb) {
41317+ status = -EBUSY;
41318+ goto err;
41319+ }
41320+ hdr = nonemb_cmd->va;
41321 sge = nonembedded_sgl(wrb);
41322
41323- be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
41324- wrb->tag0 = OPCODE_ETH_GET_STATISTICS;
41325+ be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
41326+ OPCODE_ETH_GET_STATISTICS);
41327
41328- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
41329- OPCODE_ETH_GET_STATISTICS, sizeof(*req));
41330+ be_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
41331+ OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size);
41332+
41333+ if (adapter->generation == BE_GEN3)
41334+ hdr->version = 1;
41335+
41336+ wrb->tag1 = CMD_SUBSYSTEM_ETH;
41337 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
41338 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
41339 sge->len = cpu_to_le32(nonemb_cmd->size);
41340
41341 be_mcc_notify(adapter);
41342+ adapter->stats_cmd_sent = true;
41343
41344+err:
41345 spin_unlock_bh(&adapter->mcc_lock);
41346- return 0;
41347+ return status;
41348 }
41349
41350 /* Uses synchronous mcc */
41351 int be_cmd_link_status_query(struct be_adapter *adapter,
41352- bool *link_up)
41353+ int *link_status, u8 *mac_speed, u16 *link_speed, u32 dom)
41354 {
41355 struct be_mcc_wrb *wrb;
41356 struct be_cmd_req_link_status *req;
41357@@ -843,50 +1244,216 @@ int be_cmd_link_status_query(struct be_adapter *adapter,
41358 spin_lock_bh(&adapter->mcc_lock);
41359
41360 wrb = wrb_from_mccq(adapter);
41361+ if (!wrb) {
41362+ status = -EBUSY;
41363+ goto err;
41364+ }
41365 req = embedded_payload(wrb);
41366
41367- *link_up = false;
41368+ *link_status = LINK_DOWN;
41369
41370- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41371+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41372+ OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
41373
41374 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41375 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
41376
41377+ req->hdr.domain = dom;
41378+
41379 status = be_mcc_notify_wait(adapter);
41380 if (!status) {
41381 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
41382- if (resp->mac_speed != PHY_LINK_SPEED_ZERO)
41383- *link_up = true;
41384+ if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
41385+ *link_status = LINK_UP;
41386+ *link_speed = le16_to_cpu(resp->link_speed);
41387+ *mac_speed = resp->mac_speed;
41388+ }
41389 }
41390
41391+err:
41392 spin_unlock_bh(&adapter->mcc_lock);
41393 return status;
41394 }
41395
41396-/* Uses Mbox */
41397-int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
41398+/* Uses synchronous mcc */
41399+int be_cmd_get_die_temperature(struct be_adapter *adapter)
41400+{
41401+ struct be_mcc_wrb *wrb;
41402+ struct be_cmd_req_get_cntl_addnl_attribs *req;
41403+ u16 mccq_index;
41404+ int status;
41405+
41406+ spin_lock_bh(&adapter->mcc_lock);
41407+
41408+ mccq_index = adapter->mcc_obj.q.head;
41409+
41410+ wrb = wrb_from_mccq(adapter);
41411+ if (!wrb) {
41412+ status = -EBUSY;
41413+ goto err;
41414+ }
41415+ req = embedded_payload(wrb);
41416+
41417+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41418+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
41419+
41420+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41421+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
41422+
41423+ wrb->tag1 = mccq_index;
41424+
41425+ be_mcc_notify(adapter);
41426+
41427+err:
41428+ spin_unlock_bh(&adapter->mcc_lock);
41429+ return status;
41430+}
41431+
41432+
41433+/* Uses synchronous mcc */
41434+int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
41435+{
41436+ struct be_mcc_wrb *wrb;
41437+ struct be_cmd_req_get_fat *req;
41438+ int status;
41439+
41440+ spin_lock_bh(&adapter->mcc_lock);
41441+
41442+ wrb = wrb_from_mccq(adapter);
41443+ if (!wrb) {
41444+ status = -EBUSY;
41445+ goto err;
41446+ }
41447+ req = embedded_payload(wrb);
41448+
41449+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41450+ OPCODE_COMMON_MANAGE_FAT);
41451+
41452+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41453+ OPCODE_COMMON_MANAGE_FAT, sizeof(*req));
41454+ req->fat_operation = cpu_to_le32(QUERY_FAT);
41455+ status = be_mcc_notify_wait(adapter);
41456+ if (!status) {
41457+ struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
41458+ if (log_size && resp->log_size)
41459+ *log_size = le32_to_cpu(resp->log_size) -
41460+ sizeof(u32);
41461+ }
41462+err:
41463+ spin_unlock_bh(&adapter->mcc_lock);
41464+ return status;
41465+}
41466+
41467+void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
41468+{
41469+ struct be_dma_mem get_fat_cmd;
41470+ struct be_mcc_wrb *wrb;
41471+ struct be_cmd_req_get_fat *req;
41472+ struct be_sge *sge;
41473+ u32 offset = 0, total_size, buf_size,
41474+ log_offset = sizeof(u32), payload_len;
41475+ int status;
41476+
41477+ if (buf_len == 0)
41478+ return;
41479+
41480+ total_size = buf_len;
41481+
41482+ get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
41483+ get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
41484+ get_fat_cmd.size,
41485+ &get_fat_cmd.dma);
41486+ if (!get_fat_cmd.va) {
41487+ status = -ENOMEM;
41488+ dev_err(&adapter->pdev->dev,
41489+ "Memory allocation failure while retrieving FAT data\n");
41490+ return;
41491+ }
41492+
41493+ spin_lock_bh(&adapter->mcc_lock);
41494+
41495+ while (total_size) {
41496+ buf_size = min(total_size, (u32)60*1024);
41497+ total_size -= buf_size;
41498+
41499+ wrb = wrb_from_mccq(adapter);
41500+ if (!wrb) {
41501+ status = -EBUSY;
41502+ goto err;
41503+ }
41504+ req = get_fat_cmd.va;
41505+ sge = nonembedded_sgl(wrb);
41506+
41507+ payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
41508+ be_wrb_hdr_prepare(wrb, payload_len, false, 1,
41509+ OPCODE_COMMON_MANAGE_FAT);
41510+
41511+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41512+ OPCODE_COMMON_MANAGE_FAT, payload_len);
41513+
41514+ sge->pa_hi = cpu_to_le32(upper_32_bits(get_fat_cmd.dma));
41515+ sge->pa_lo = cpu_to_le32(get_fat_cmd.dma & 0xFFFFFFFF);
41516+ sge->len = cpu_to_le32(get_fat_cmd.size);
41517+
41518+ req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
41519+ req->read_log_offset = cpu_to_le32(log_offset);
41520+ req->read_log_length = cpu_to_le32(buf_size);
41521+ req->data_buffer_size = cpu_to_le32(buf_size);
41522+
41523+ status = be_mcc_notify_wait(adapter);
41524+ if (!status) {
41525+ struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
41526+ memcpy(buf + offset,
41527+ resp->data_buffer,
41528+ le32_to_cpu(resp->read_log_length));
41529+ } else {
41530+ dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
41531+ goto err;
41532+ }
41533+ offset += buf_size;
41534+ log_offset += buf_size;
41535+ }
41536+err:
41537+ pci_free_consistent(adapter->pdev, get_fat_cmd.size,
41538+ get_fat_cmd.va,
41539+ get_fat_cmd.dma);
41540+ spin_unlock_bh(&adapter->mcc_lock);
41541+}
41542+
41543+/* Uses synchronous mcc */
41544+int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
41545+ char *fw_on_flash)
41546 {
41547 struct be_mcc_wrb *wrb;
41548 struct be_cmd_req_get_fw_version *req;
41549 int status;
41550
41551- spin_lock(&adapter->mbox_lock);
41552+ spin_lock_bh(&adapter->mcc_lock);
41553+
41554+ wrb = wrb_from_mccq(adapter);
41555+ if (!wrb) {
41556+ status = -EBUSY;
41557+ goto err;
41558+ }
41559
41560- wrb = wrb_from_mbox(adapter);
41561 req = embedded_payload(wrb);
41562
41563- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41564+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41565+ OPCODE_COMMON_GET_FW_VERSION);
41566
41567 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41568 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
41569
41570- status = be_mbox_notify_wait(adapter);
41571+ status = be_mcc_notify_wait(adapter);
41572 if (!status) {
41573 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
41574- strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
41575+ strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN-1);
41576+ if (fw_on_flash)
41577+ strncpy(fw_on_flash, resp->fw_on_flash_version_string,
41578+ FW_VER_LEN-1);
41579 }
41580-
41581- spin_unlock(&adapter->mbox_lock);
41582+err:
41583+ spin_unlock_bh(&adapter->mcc_lock);
41584 return status;
41585 }
41586
41587@@ -897,13 +1464,19 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
41588 {
41589 struct be_mcc_wrb *wrb;
41590 struct be_cmd_req_modify_eq_delay *req;
41591+ int status = 0;
41592
41593 spin_lock_bh(&adapter->mcc_lock);
41594
41595 wrb = wrb_from_mccq(adapter);
41596+ if (!wrb) {
41597+ status = -EBUSY;
41598+ goto err;
41599+ }
41600 req = embedded_payload(wrb);
41601
41602- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41603+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41604+ OPCODE_COMMON_MODIFY_EQ_DELAY);
41605
41606 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41607 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
41608@@ -915,8 +1488,9 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
41609
41610 be_mcc_notify(adapter);
41611
41612+err:
41613 spin_unlock_bh(&adapter->mcc_lock);
41614- return 0;
41615+ return status;
41616 }
41617
41618 /* Uses sycnhronous mcc */
41619@@ -930,9 +1504,14 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
41620 spin_lock_bh(&adapter->mcc_lock);
41621
41622 wrb = wrb_from_mccq(adapter);
41623+ if (!wrb) {
41624+ status = -EBUSY;
41625+ goto err;
41626+ }
41627 req = embedded_payload(wrb);
41628
41629- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41630+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41631+ OPCODE_COMMON_NTWK_VLAN_CONFIG);
41632
41633 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41634 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
41635@@ -948,79 +1527,63 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
41636
41637 status = be_mcc_notify_wait(adapter);
41638
41639+err:
41640 spin_unlock_bh(&adapter->mcc_lock);
41641 return status;
41642 }
41643
41644-/* Uses MCC for this command as it may be called in BH context
41645- * Uses synchronous mcc
41646- */
41647-int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
41648+int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
41649 {
41650 struct be_mcc_wrb *wrb;
41651- struct be_cmd_req_promiscuous_config *req;
41652+ struct be_dma_mem *mem = &adapter->rx_filter;
41653+ struct be_cmd_req_rx_filter *req = mem->va;
41654+ struct be_sge *sge;
41655 int status;
41656
41657 spin_lock_bh(&adapter->mcc_lock);
41658
41659 wrb = wrb_from_mccq(adapter);
41660- req = embedded_payload(wrb);
41661-
41662- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41663-
41664- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
41665- OPCODE_ETH_PROMISCUOUS, sizeof(*req));
41666-
41667- if (port_num)
41668- req->port1_promiscuous = en;
41669- else
41670- req->port0_promiscuous = en;
41671-
41672- status = be_mcc_notify_wait(adapter);
41673-
41674- spin_unlock_bh(&adapter->mcc_lock);
41675- return status;
41676-}
41677-
41678-/*
41679- * Uses MCC for this command as it may be called in BH context
41680- * (mc == NULL) => multicast promiscous
41681- */
41682-int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
41683- struct dev_mc_list *mc_list, u32 mc_count)
41684-{
41685-#define BE_MAX_MC 32 /* set mcast promisc if > 32 */
41686- struct be_mcc_wrb *wrb;
41687- struct be_cmd_req_mcast_mac_config *req;
41688-
41689- spin_lock_bh(&adapter->mcc_lock);
41690-
41691- wrb = wrb_from_mccq(adapter);
41692- req = embedded_payload(wrb);
41693-
41694- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41695-
41696+ if (!wrb) {
41697+ status = -EBUSY;
41698+ goto err;
41699+ }
41700+ sge = nonembedded_sgl(wrb);
41701+ sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
41702+ sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
41703+ sge->len = cpu_to_le32(mem->size);
41704+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
41705+ OPCODE_COMMON_NTWK_RX_FILTER);
41706+
41707+ memset(req, 0, sizeof(*req));
41708 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41709- OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
41710+ OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req));
41711
41712- req->interface_id = if_id;
41713- if (mc_list && mc_count <= BE_MAX_MC) {
41714- int i;
41715- struct dev_mc_list *mc;
41716-
41717- req->num_mac = cpu_to_le16(mc_count);
41718-
41719- for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
41720- memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
41721+ req->if_id = cpu_to_le32(adapter->if_handle);
41722+ if (flags & IFF_PROMISC) {
41723+ req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
41724+ BE_IF_FLAGS_VLAN_PROMISCUOUS);
41725+ if (value == ON)
41726+ req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
41727+ BE_IF_FLAGS_VLAN_PROMISCUOUS);
41728+ } else if (flags & IFF_ALLMULTI) {
41729+ req->if_flags_mask = req->if_flags =
41730+ cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
41731 } else {
41732- req->promiscuous = 1;
41733- }
41734+ struct netdev_hw_addr *ha;
41735+ int i = 0;
41736
41737- be_mcc_notify_wait(adapter);
41738+ req->if_flags_mask = req->if_flags =
41739+ cpu_to_le32(BE_IF_FLAGS_MULTICAST);
41740+ req->mcast_num = cpu_to_le16(netdev_mc_count(adapter->netdev));
41741+ netdev_for_each_mc_addr(ha, adapter->netdev)
41742+ memcpy(req->mcast_mac[i++].byte, ha->DMI_ADDR,
41743+ ETH_ALEN);
41744+ }
41745+ status = be_mcc_notify_wait(adapter);
41746
41747+err:
41748 spin_unlock_bh(&adapter->mcc_lock);
41749-
41750- return 0;
41751+ return status;
41752 }
41753
41754 /* Uses synchrounous mcc */
41755@@ -1033,9 +1596,14 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
41756 spin_lock_bh(&adapter->mcc_lock);
41757
41758 wrb = wrb_from_mccq(adapter);
41759+ if (!wrb) {
41760+ status = -EBUSY;
41761+ goto err;
41762+ }
41763 req = embedded_payload(wrb);
41764
41765- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41766+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41767+ OPCODE_COMMON_SET_FLOW_CONTROL);
41768
41769 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41770 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
41771@@ -1045,6 +1613,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
41772
41773 status = be_mcc_notify_wait(adapter);
41774
41775+err:
41776 spin_unlock_bh(&adapter->mcc_lock);
41777 return status;
41778 }
41779@@ -1059,9 +1628,14 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
41780 spin_lock_bh(&adapter->mcc_lock);
41781
41782 wrb = wrb_from_mccq(adapter);
41783+ if (!wrb) {
41784+ status = -EBUSY;
41785+ goto err;
41786+ }
41787 req = embedded_payload(wrb);
41788
41789- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41790+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41791+ OPCODE_COMMON_GET_FLOW_CONTROL);
41792
41793 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41794 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
41795@@ -1074,23 +1648,27 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
41796 *rx_fc = le16_to_cpu(resp->rx_flow_control);
41797 }
41798
41799+err:
41800 spin_unlock_bh(&adapter->mcc_lock);
41801 return status;
41802 }
41803
41804 /* Uses mbox */
41805-int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
41806+int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
41807+ u32 *mode, u32 *function_caps)
41808 {
41809 struct be_mcc_wrb *wrb;
41810 struct be_cmd_req_query_fw_cfg *req;
41811 int status;
41812
41813- spin_lock(&adapter->mbox_lock);
41814+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41815+ return -1;
41816
41817 wrb = wrb_from_mbox(adapter);
41818 req = embedded_payload(wrb);
41819
41820- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41821+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41822+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
41823
41824 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41825 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
41826@@ -1099,10 +1677,11 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
41827 if (!status) {
41828 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
41829 *port_num = le32_to_cpu(resp->phys_port);
41830- *cap = le32_to_cpu(resp->function_cap);
41831+ *mode = le32_to_cpu(resp->function_mode);
41832+ *function_caps = le32_to_cpu(resp->function_caps);
41833 }
41834
41835- spin_unlock(&adapter->mbox_lock);
41836+ mutex_unlock(&adapter->mbox_lock);
41837 return status;
41838 }
41839
41840@@ -1113,19 +1692,161 @@ int be_cmd_reset_function(struct be_adapter *adapter)
41841 struct be_cmd_req_hdr *req;
41842 int status;
41843
41844- spin_lock(&adapter->mbox_lock);
41845+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41846+ return -1;
41847
41848 wrb = wrb_from_mbox(adapter);
41849 req = embedded_payload(wrb);
41850
41851- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41852+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41853+ OPCODE_COMMON_FUNCTION_RESET);
41854
41855 be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
41856 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
41857
41858 status = be_mbox_notify_wait(adapter);
41859
41860- spin_unlock(&adapter->mbox_lock);
41861+ mutex_unlock(&adapter->mbox_lock);
41862+ return status;
41863+}
41864+
41865+int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
41866+{
41867+ struct be_mcc_wrb *wrb;
41868+ struct be_cmd_req_rss_config *req;
41869+ u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
41870+ 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
41871+ int status;
41872+
41873+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41874+ return -1;
41875+
41876+ wrb = wrb_from_mbox(adapter);
41877+ req = embedded_payload(wrb);
41878+
41879+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41880+ OPCODE_ETH_RSS_CONFIG);
41881+
41882+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
41883+ OPCODE_ETH_RSS_CONFIG, sizeof(*req));
41884+
41885+ req->if_id = cpu_to_le32(adapter->if_handle);
41886+ req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
41887+ req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
41888+ memcpy(req->cpu_table, rsstable, table_size);
41889+ memcpy(req->hash, myhash, sizeof(myhash));
41890+ be_dws_cpu_to_le(req->hash, sizeof(req->hash));
41891+
41892+ status = be_mbox_notify_wait(adapter);
41893+
41894+ mutex_unlock(&adapter->mbox_lock);
41895+ return status;
41896+}
41897+
41898+/* Uses sync mcc */
41899+int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
41900+ u8 bcn, u8 sts, u8 state)
41901+{
41902+ struct be_mcc_wrb *wrb;
41903+ struct be_cmd_req_enable_disable_beacon *req;
41904+ int status;
41905+
41906+ spin_lock_bh(&adapter->mcc_lock);
41907+
41908+ wrb = wrb_from_mccq(adapter);
41909+ if (!wrb) {
41910+ status = -EBUSY;
41911+ goto err;
41912+ }
41913+ req = embedded_payload(wrb);
41914+
41915+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41916+ OPCODE_COMMON_ENABLE_DISABLE_BEACON);
41917+
41918+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41919+ OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
41920+
41921+ req->port_num = port_num;
41922+ req->beacon_state = state;
41923+ req->beacon_duration = bcn;
41924+ req->status_duration = sts;
41925+
41926+ status = be_mcc_notify_wait(adapter);
41927+
41928+err:
41929+ spin_unlock_bh(&adapter->mcc_lock);
41930+ return status;
41931+}
41932+
41933+/* Uses sync mcc */
41934+int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
41935+{
41936+ struct be_mcc_wrb *wrb;
41937+ struct be_cmd_req_get_beacon_state *req;
41938+ int status;
41939+
41940+ spin_lock_bh(&adapter->mcc_lock);
41941+
41942+ wrb = wrb_from_mccq(adapter);
41943+ if (!wrb) {
41944+ status = -EBUSY;
41945+ goto err;
41946+ }
41947+ req = embedded_payload(wrb);
41948+
41949+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41950+ OPCODE_COMMON_GET_BEACON_STATE);
41951+
41952+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41953+ OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
41954+
41955+ req->port_num = port_num;
41956+
41957+ status = be_mcc_notify_wait(adapter);
41958+ if (!status) {
41959+ struct be_cmd_resp_get_beacon_state *resp =
41960+ embedded_payload(wrb);
41961+ *state = resp->beacon_state;
41962+ }
41963+
41964+err:
41965+ spin_unlock_bh(&adapter->mcc_lock);
41966+ return status;
41967+}
41968+
41969+/* Uses sync mcc */
41970+int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
41971+ u8 *connector)
41972+{
41973+ struct be_mcc_wrb *wrb;
41974+ struct be_cmd_req_port_type *req;
41975+ int status;
41976+
41977+ spin_lock_bh(&adapter->mcc_lock);
41978+
41979+ wrb = wrb_from_mccq(adapter);
41980+ if (!wrb) {
41981+ status = -EBUSY;
41982+ goto err;
41983+ }
41984+ req = embedded_payload(wrb);
41985+
41986+ be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
41987+ OPCODE_COMMON_READ_TRANSRECV_DATA);
41988+
41989+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41990+ OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
41991+
41992+ req->port = cpu_to_le32(port);
41993+ req->page_num = cpu_to_le32(TR_PAGE_A0);
41994+ status = be_mcc_notify_wait(adapter);
41995+ if (!status) {
41996+ struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
41997+ *connector = resp->data.connector;
41998+ }
41999+
42000+err:
42001+ spin_unlock_bh(&adapter->mcc_lock);
42002 return status;
42003 }
42004
42005@@ -1133,16 +1854,24 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
42006 u32 flash_type, u32 flash_opcode, u32 buf_size)
42007 {
42008 struct be_mcc_wrb *wrb;
42009- struct be_cmd_write_flashrom *req = cmd->va;
42010+ struct be_cmd_write_flashrom *req;
42011 struct be_sge *sge;
42012 int status;
42013
42014 spin_lock_bh(&adapter->mcc_lock);
42015+ adapter->flash_status = 0;
42016
42017 wrb = wrb_from_mccq(adapter);
42018+ if (!wrb) {
42019+ status = -EBUSY;
42020+ goto err_unlock;
42021+ }
42022+ req = cmd->va;
42023 sge = nonembedded_sgl(wrb);
42024
42025- be_wrb_hdr_prepare(wrb, cmd->size, false, 1);
42026+ be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
42027+ OPCODE_COMMON_WRITE_FLASHROM);
42028+ wrb->tag1 = CMD_SUBSYSTEM_COMMON;
42029
42030 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42031 OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
42032@@ -1154,8 +1883,852 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
42033 req->params.op_code = cpu_to_le32(flash_opcode);
42034 req->params.data_buf_size = cpu_to_le32(buf_size);
42035
42036+ be_mcc_notify(adapter);
42037+ spin_unlock_bh(&adapter->mcc_lock);
42038+
42039+ if (!wait_for_completion_timeout(&adapter->flash_compl,
42040+ msecs_to_jiffies(40000)))
42041+ status = -1;
42042+ else
42043+ status = adapter->flash_status;
42044+
42045+ return status;
42046+
42047+err_unlock:
42048+ spin_unlock_bh(&adapter->mcc_lock);
42049+ return status;
42050+}
42051+
42052+int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
42053+ int offset)
42054+{
42055+ struct be_mcc_wrb *wrb;
42056+ struct be_cmd_write_flashrom *req;
42057+ int status;
42058+
42059+ spin_lock_bh(&adapter->mcc_lock);
42060+
42061+ wrb = wrb_from_mccq(adapter);
42062+ if (!wrb) {
42063+ status = -EBUSY;
42064+ goto err;
42065+ }
42066+ req = embedded_payload(wrb);
42067+
42068+ be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
42069+ OPCODE_COMMON_READ_FLASHROM);
42070+
42071+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42072+ OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
42073+
42074+ req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
42075+ req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
42076+ req->params.offset = cpu_to_le32(offset);
42077+ req->params.data_buf_size = cpu_to_le32(0x4);
42078+
42079+ status = be_mcc_notify_wait(adapter);
42080+ if (!status)
42081+ memcpy(flashed_crc, req->params.data_buf, 4);
42082+
42083+err:
42084+ spin_unlock_bh(&adapter->mcc_lock);
42085+ return status;
42086+}
42087+
42088+int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
42089+ struct be_dma_mem *nonemb_cmd)
42090+{
42091+ struct be_mcc_wrb *wrb;
42092+ struct be_cmd_req_acpi_wol_magic_config *req;
42093+ struct be_sge *sge;
42094+ int status;
42095+
42096+ spin_lock_bh(&adapter->mcc_lock);
42097+
42098+ wrb = wrb_from_mccq(adapter);
42099+ if (!wrb) {
42100+ status = -EBUSY;
42101+ goto err;
42102+ }
42103+ req = nonemb_cmd->va;
42104+ sge = nonembedded_sgl(wrb);
42105+
42106+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
42107+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
42108+
42109+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
42110+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
42111+ memcpy(req->magic_mac, mac, ETH_ALEN);
42112+
42113+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
42114+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
42115+ sge->len = cpu_to_le32(nonemb_cmd->size);
42116+
42117+ status = be_mcc_notify_wait(adapter);
42118+
42119+err:
42120+ spin_unlock_bh(&adapter->mcc_lock);
42121+ return status;
42122+}
42123+
42124+int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
42125+ u8 loopback_type, u8 enable)
42126+{
42127+ struct be_mcc_wrb *wrb;
42128+ struct be_cmd_req_set_lmode *req;
42129+ int status;
42130+
42131+ spin_lock_bh(&adapter->mcc_lock);
42132+
42133+ wrb = wrb_from_mccq(adapter);
42134+ if (!wrb) {
42135+ status = -EBUSY;
42136+ goto err;
42137+ }
42138+
42139+ req = embedded_payload(wrb);
42140+
42141+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42142+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
42143+
42144+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
42145+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
42146+ sizeof(*req));
42147+
42148+ req->src_port = port_num;
42149+ req->dest_port = port_num;
42150+ req->loopback_type = loopback_type;
42151+ req->loopback_state = enable;
42152+
42153+ status = be_mcc_notify_wait(adapter);
42154+err:
42155+ spin_unlock_bh(&adapter->mcc_lock);
42156+ return status;
42157+}
42158+
42159+int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
42160+ u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
42161+{
42162+ struct be_mcc_wrb *wrb;
42163+ struct be_cmd_req_loopback_test *req;
42164+ int status;
42165+
42166+ spin_lock_bh(&adapter->mcc_lock);
42167+
42168+ wrb = wrb_from_mccq(adapter);
42169+ if (!wrb) {
42170+ status = -EBUSY;
42171+ goto err;
42172+ }
42173+
42174+ req = embedded_payload(wrb);
42175+
42176+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42177+ OPCODE_LOWLEVEL_LOOPBACK_TEST);
42178+
42179+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
42180+ OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
42181+ req->hdr.timeout = cpu_to_le32(4);
42182+
42183+ req->pattern = cpu_to_le64(pattern);
42184+ req->src_port = cpu_to_le32(port_num);
42185+ req->dest_port = cpu_to_le32(port_num);
42186+ req->pkt_size = cpu_to_le32(pkt_size);
42187+ req->num_pkts = cpu_to_le32(num_pkts);
42188+ req->loopback_type = cpu_to_le32(loopback_type);
42189+
42190+ status = be_mcc_notify_wait(adapter);
42191+ if (!status) {
42192+ struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
42193+ status = le32_to_cpu(resp->status);
42194+ }
42195+
42196+err:
42197+ spin_unlock_bh(&adapter->mcc_lock);
42198+ return status;
42199+}
42200+
42201+int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
42202+ u32 byte_cnt, struct be_dma_mem *cmd)
42203+{
42204+ struct be_mcc_wrb *wrb;
42205+ struct be_cmd_req_ddrdma_test *req;
42206+ struct be_sge *sge;
42207+ int status;
42208+ int i, j = 0;
42209+
42210+ spin_lock_bh(&adapter->mcc_lock);
42211+
42212+ wrb = wrb_from_mccq(adapter);
42213+ if (!wrb) {
42214+ status = -EBUSY;
42215+ goto err;
42216+ }
42217+ req = cmd->va;
42218+ sge = nonembedded_sgl(wrb);
42219+ be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
42220+ OPCODE_LOWLEVEL_HOST_DDR_DMA);
42221+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
42222+ OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
42223+
42224+ sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
42225+ sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
42226+ sge->len = cpu_to_le32(cmd->size);
42227+
42228+ req->pattern = cpu_to_le64(pattern);
42229+ req->byte_count = cpu_to_le32(byte_cnt);
42230+ for (i = 0; i < byte_cnt; i++) {
42231+ req->snd_buff[i] = (u8)(pattern >> (j*8));
42232+ j++;
42233+ if (j > 7)
42234+ j = 0;
42235+ }
42236+
42237+ status = be_mcc_notify_wait(adapter);
42238+
42239+ if (!status) {
42240+ struct be_cmd_resp_ddrdma_test *resp;
42241+ resp = cmd->va;
42242+ if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
42243+ resp->snd_err) {
42244+ status = -1;
42245+ }
42246+ }
42247+
42248+err:
42249+ spin_unlock_bh(&adapter->mcc_lock);
42250+ return status;
42251+}
42252+
42253+int be_cmd_get_seeprom_data(struct be_adapter *adapter,
42254+ struct be_dma_mem *nonemb_cmd)
42255+{
42256+ struct be_mcc_wrb *wrb;
42257+ struct be_cmd_req_seeprom_read *req;
42258+ struct be_sge *sge;
42259+ int status;
42260+
42261+ spin_lock_bh(&adapter->mcc_lock);
42262+
42263+ wrb = wrb_from_mccq(adapter);
42264+ req = nonemb_cmd->va;
42265+ sge = nonembedded_sgl(wrb);
42266+
42267+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
42268+ OPCODE_COMMON_SEEPROM_READ);
42269+
42270+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42271+ OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
42272+
42273+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
42274+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
42275+ sge->len = cpu_to_le32(nonemb_cmd->size);
42276+
42277+ status = be_mcc_notify_wait(adapter);
42278+
42279+ spin_unlock_bh(&adapter->mcc_lock);
42280+ return status;
42281+}
42282+
42283+int be_cmd_get_phy_info(struct be_adapter *adapter,
42284+ struct be_phy_info *phy_info)
42285+{
42286+ struct be_mcc_wrb *wrb;
42287+ struct be_cmd_req_get_phy_info *req;
42288+ struct be_sge *sge;
42289+ struct be_dma_mem cmd;
42290+ struct be_phy_info *resp_phy_info;
42291+ int status;
42292+
42293+ spin_lock_bh(&adapter->mcc_lock);
42294+ wrb = wrb_from_mccq(adapter);
42295+ if (!wrb) {
42296+ status = -EBUSY;
42297+ goto err;
42298+ }
42299+ cmd.size = sizeof(struct be_cmd_req_get_phy_info);
42300+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
42301+ &cmd.dma);
42302+ if (!cmd.va) {
42303+ dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
42304+ status = -ENOMEM;
42305+ goto err;
42306+ }
42307+
42308+ req = cmd.va;
42309+ sge = nonembedded_sgl(wrb);
42310+
42311+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
42312+ OPCODE_COMMON_GET_PHY_DETAILS);
42313+
42314+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42315+ OPCODE_COMMON_GET_PHY_DETAILS,
42316+ sizeof(*req));
42317+
42318+ sge->pa_hi = cpu_to_le32(upper_32_bits(cmd.dma));
42319+ sge->pa_lo = cpu_to_le32(cmd.dma & 0xFFFFFFFF);
42320+ sge->len = cpu_to_le32(cmd.size);
42321+
42322+ status = be_mcc_notify_wait(adapter);
42323+ if (!status) {
42324+ resp_phy_info = cmd.va + sizeof(struct be_cmd_req_hdr);
42325+ phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type);
42326+ phy_info->interface_type =
42327+ le16_to_cpu(resp_phy_info->interface_type);
42328+ phy_info->auto_speeds_supported =
42329+ le16_to_cpu(resp_phy_info->auto_speeds_supported);
42330+ phy_info->fixed_speeds_supported =
42331+ le16_to_cpu(resp_phy_info->fixed_speeds_supported);
42332+ phy_info->misc_params =
42333+ le32_to_cpu(resp_phy_info->misc_params);
42334+ }
42335+ pci_free_consistent(adapter->pdev, cmd.size,
42336+ cmd.va, cmd.dma);
42337+err:
42338+ spin_unlock_bh(&adapter->mcc_lock);
42339+ return status;
42340+}
42341+
42342+int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
42343+{
42344+ struct be_mcc_wrb *wrb;
42345+ struct be_cmd_req_set_qos *req;
42346+ int status;
42347+
42348+ spin_lock_bh(&adapter->mcc_lock);
42349+
42350+ wrb = wrb_from_mccq(adapter);
42351+ if (!wrb) {
42352+ status = -EBUSY;
42353+ goto err;
42354+ }
42355+
42356+ req = embedded_payload(wrb);
42357+
42358+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42359+ OPCODE_COMMON_SET_QOS);
42360+
42361+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42362+ OPCODE_COMMON_SET_QOS, sizeof(*req));
42363+
42364+ req->hdr.domain = domain;
42365+ req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
42366+ req->max_bps_nic = cpu_to_le32(bps);
42367+
42368+ status = be_mcc_notify_wait(adapter);
42369+err:
42370+ spin_unlock_bh(&adapter->mcc_lock);
42371+ return status;
42372+}
42373+
42374+int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
42375+{
42376+ struct be_mcc_wrb *wrb;
42377+ struct be_cmd_req_cntl_attribs *req;
42378+ struct be_cmd_resp_cntl_attribs *resp;
42379+ struct be_sge *sge;
42380+ int status;
42381+ int payload_len = max(sizeof(*req), sizeof(*resp));
42382+ struct mgmt_controller_attrib *attribs;
42383+ struct be_dma_mem attribs_cmd;
42384+
42385+ memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
42386+ attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
42387+ attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
42388+ &attribs_cmd.dma);
42389+ if (!attribs_cmd.va) {
42390+ dev_err(&adapter->pdev->dev,
42391+ "Memory allocation failure\n");
42392+ return -ENOMEM;
42393+ }
42394+
42395+ if (mutex_lock_interruptible(&adapter->mbox_lock))
42396+ return -1;
42397+
42398+ wrb = wrb_from_mbox(adapter);
42399+ if (!wrb) {
42400+ status = -EBUSY;
42401+ goto err;
42402+ }
42403+ req = attribs_cmd.va;
42404+ sge = nonembedded_sgl(wrb);
42405+
42406+ be_wrb_hdr_prepare(wrb, payload_len, false, 1,
42407+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
42408+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42409+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
42410+ sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
42411+ sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
42412+ sge->len = cpu_to_le32(attribs_cmd.size);
42413+
42414+ status = be_mbox_notify_wait(adapter);
42415+ if (!status) {
42416+ attribs = (struct mgmt_controller_attrib *)(attribs_cmd.va +
42417+ sizeof(struct be_cmd_resp_hdr));
42418+ adapter->hba_port_num = attribs->hba_attribs.phy_port;
42419+ strncpy(adapter->model_number,
42420+ attribs->hba_attribs.controller_model_number, 31);
42421+ }
42422+
42423+err:
42424+ mutex_unlock(&adapter->mbox_lock);
42425+ pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
42426+ attribs_cmd.dma);
42427+ return status;
42428+}
42429+
42430+/* Uses mbox */
42431+int be_cmd_req_native_mode(struct be_adapter *adapter)
42432+{
42433+ struct be_mcc_wrb *wrb;
42434+ struct be_cmd_req_set_func_cap *req;
42435+ int status;
42436+
42437+ if (mutex_lock_interruptible(&adapter->mbox_lock))
42438+ return -1;
42439+
42440+ wrb = wrb_from_mbox(adapter);
42441+ if (!wrb) {
42442+ status = -EBUSY;
42443+ goto err;
42444+ }
42445+
42446+ req = embedded_payload(wrb);
42447+
42448+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42449+ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP);
42450+
42451+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42452+ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req));
42453+
42454+ req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
42455+ CAPABILITY_BE3_NATIVE_ERX_API);
42456+ req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
42457+
42458+ status = be_mbox_notify_wait(adapter);
42459+ if (!status) {
42460+ struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
42461+ adapter->be3_native = le32_to_cpu(resp->cap_flags) &
42462+ CAPABILITY_BE3_NATIVE_ERX_API;
42463+ }
42464+err:
42465+ mutex_unlock(&adapter->mbox_lock);
42466+ return status;
42467+}
42468+
42469+static void encode_port_names(struct be_adapter *adapter)
42470+{
42471+ switch (adapter->port_name[adapter->hba_port_num]) {
42472+ case '0':
42473+ adapter->port_name[adapter->hba_port_num] = 0;
42474+ break;
42475+ case '1':
42476+ adapter->port_name[adapter->hba_port_num] = 1;
42477+ break;
42478+ case '2':
42479+ adapter->port_name[adapter->hba_port_num] = 2;
42480+ break;
42481+ case '3':
42482+ adapter->port_name[adapter->hba_port_num] = 3;
42483+ break;
42484+ case '4':
42485+ adapter->port_name[adapter->hba_port_num] = 4;
42486+ break;
42487+ case 'A':
42488+ adapter->port_name[adapter->hba_port_num] = 5;
42489+ break;
42490+ case 'B':
42491+ adapter->port_name[adapter->hba_port_num] = 6;
42492+ break;
42493+ case 'C':
42494+ adapter->port_name[adapter->hba_port_num] = 7;
42495+ break;
42496+ case 'D':
42497+ adapter->port_name[adapter->hba_port_num] = 8;
42498+ break;
42499+ }
42500+}
42501+
42502+int be_cmd_query_port_names_v0(struct be_adapter *adapter, u8 *port_name)
42503+{
42504+ struct be_mcc_wrb *wrb;
42505+ struct be_cmd_req_get_port_name *req;
42506+ int status;
42507+
42508+ spin_lock_bh(&adapter->mcc_lock);
42509+
42510+ wrb = wrb_from_mccq(adapter);
42511+ if (!wrb) {
42512+ status = -EBUSY;
42513+ goto err;
42514+ }
42515+
42516+ req = embedded_payload(wrb);
42517+
42518+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42519+ OPCODE_COMMON_GET_PORT_NAME);
42520+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42521+ OPCODE_COMMON_GET_PORT_NAME, sizeof(*req));
42522+
42523+ status = be_mcc_notify_wait(adapter);
42524+ if (!status) {
42525+ struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
42526+ port_name[0] = resp->port0_name;
42527+ port_name[1] = resp->port1_name;
42528+ }
42529+
42530+err:
42531+ spin_unlock_bh(&adapter->mcc_lock);
42532+
42533+ if(!status)
42534+ encode_port_names(adapter);
42535+ return status;
42536+}
42537+
42538+int be_cmd_query_port_names_v1(struct be_adapter *adapter, u8 *port_name)
42539+{
42540+ struct be_mcc_wrb *wrb;
42541+ struct be_cmd_req_get_port_name *req;
42542+ int status;
42543+
42544+ spin_lock_bh(&adapter->mcc_lock);
42545+
42546+ wrb = wrb_from_mccq(adapter);
42547+ if (!wrb) {
42548+ status = -EBUSY;
42549+ goto err;
42550+ }
42551+ req = embedded_payload(wrb);
42552+
42553+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42554+ OPCODE_COMMON_GET_PORT_NAME);
42555+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42556+ OPCODE_COMMON_GET_PORT_NAME, sizeof(*req));
42557+ req->hdr.version = 1;
42558+
42559 status = be_mcc_notify_wait(adapter);
42560+ if (!status) {
42561+ struct be_cmd_resp_get_port_name_v1 *resp = embedded_payload(wrb);
42562+ port_name[0] = resp->port0_name;
42563+ port_name[1] = resp->port1_name;
42564+ port_name[2] = resp->port2_name;
42565+ port_name[3] = resp->port3_name;
42566+ }
42567+
42568+err:
42569+ spin_unlock_bh(&adapter->mcc_lock);
42570+
42571+ if (!status)
42572+ encode_port_names(adapter);
42573+ return status;
42574+}
42575+
42576+int be_cmd_req_pg_pfc(struct be_adapter *adapter, int *fw_num_txqs)
42577+{
42578+ struct be_mcc_wrb *wrb;
42579+ struct be_cmd_req_pg *req;
42580+ int status, num = 0;
42581+ bool query = true;
42582+
42583+ *fw_num_txqs = MAX_TX_QS;
42584+
42585+ if (mutex_lock_interruptible(&adapter->mbox_lock))
42586+ return -1;
42587+
42588+enable_pfc:
42589+ wrb = wrb_from_mbox(adapter);
42590+ req = embedded_payload(wrb);
42591+
42592+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42593+ OPCODE_ETH_PG_FEATURE_QUERY_REQUEST);
42594+
42595+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
42596+ OPCODE_ETH_PG_FEATURE_QUERY_REQUEST, sizeof(*req));
42597+
42598+ if (query)
42599+ req->query |= cpu_to_le32(REQ_PG_QUERY);
42600+ req->pfc_pg |= cpu_to_le32(REQ_PG_FEAT);
42601+
42602+ status = be_mbox_notify_wait(adapter);
42603+ if (!status) {
42604+ struct be_cmd_resp_pg *resp = embedded_payload(wrb);
42605+ if (query) {
42606+ if (le32_to_cpu(resp->pfc_pg) & REQ_PG_FEAT) {
42607+ num = le32_to_cpu(resp->num_tx_rings);
42608+ query = false;
42609+ goto enable_pfc;
42610+ }
42611+ } else {
42612+ adapter->flags |= BE_FLAGS_DCBX;
42613+ *fw_num_txqs = num;
42614+ }
42615+ }
42616+
42617+ mutex_unlock(&adapter->mbox_lock);
42618+ return status;
42619+}
42620+
42621+/* Set privilege(s) for a function */
42622+int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 mask, u32 *prev,
42623+ u32 domain)
42624+{
42625+ struct be_mcc_wrb *wrb;
42626+ struct be_cmd_req_set_fn_privileges *req;
42627+ int status;
42628+
42629+ spin_lock_bh(&adapter->mcc_lock);
42630+
42631+ wrb = wrb_from_mccq(adapter);
42632+ if (!wrb) {
42633+ status = -EBUSY;
42634+ goto err;
42635+ }
42636+
42637+ req = embedded_payload(wrb);
42638+
42639+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42640+ OPCODE_COMMON_SET_FN_PRIVILEGES);
42641+
42642+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42643+ OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req));
42644+
42645+ req->hdr.domain = domain;
42646+ req->privilege_mask = cpu_to_le32(mask);
42647+
42648+ status = be_mcc_notify_wait(adapter);
42649+
42650+err:
42651+ spin_unlock_bh(&adapter->mcc_lock);
42652+ return status;
42653+}
42654+
42655+/* Get privilege(s) for a function */
42656+int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
42657+ u32 domain)
42658+{
42659+ struct be_mcc_wrb *wrb;
42660+ struct be_cmd_req_get_fn_privileges *req;
42661+ int status;
42662+
42663+ spin_lock_bh(&adapter->mcc_lock);
42664+
42665+ wrb = wrb_from_mccq(adapter);
42666+ if (!wrb) {
42667+ status = -EBUSY;
42668+ goto err;
42669+ }
42670+
42671+ req = embedded_payload(wrb);
42672+
42673+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42674+ OPCODE_COMMON_GET_FN_PRIVILEGES);
42675
42676+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42677+ OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req));
42678+
42679+ req->hdr.domain = domain;
42680+
42681+ status = be_mcc_notify_wait(adapter);
42682+ if (!status) {
42683+ struct be_cmd_resp_get_fn_privileges *resp =
42684+ embedded_payload(wrb);
42685+ *privilege = le32_to_cpu(resp->privilege_mask);
42686+ } else
42687+ *privilege = 0;
42688+
42689+err:
42690+ spin_unlock_bh(&adapter->mcc_lock);
42691+ return status;
42692+}
42693+
42694+/* Set Hyper switch config */
42695+int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
42696+ u32 domain, u16 intf_id)
42697+{
42698+ struct be_mcc_wrb *wrb;
42699+ struct be_cmd_req_set_hsw_config *req;
42700+ void *ctxt;
42701+ int status;
42702+
42703+ spin_lock_bh(&adapter->mcc_lock);
42704+
42705+ wrb = wrb_from_mccq(adapter);
42706+ if (!wrb) {
42707+ status = -EBUSY;
42708+ goto err;
42709+ }
42710+
42711+ req = embedded_payload(wrb);
42712+ ctxt = &req->context;
42713+
42714+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42715+ OPCODE_COMMON_SET_HSW_CONFIG);
42716+
42717+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42718+ OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req));
42719+
42720+ req->hdr.domain = domain;
42721+ AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
42722+ if (pvid) {
42723+ AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
42724+ AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
42725+ }
42726+
42727+ be_dws_cpu_to_le(req->context, sizeof(req->context));
42728+ status = be_mcc_notify_wait(adapter);
42729+
42730+err:
42731+ spin_unlock_bh(&adapter->mcc_lock);
42732+ return status;
42733+}
42734+
42735+/* Get Hyper switch config */
42736+int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
42737+ u32 domain, u16 intf_id)
42738+{
42739+ struct be_mcc_wrb *wrb;
42740+ struct be_cmd_req_get_hsw_config *req;
42741+ void *ctxt;
42742+ int status;
42743+ u16 vid;
42744+
42745+ spin_lock_bh(&adapter->mcc_lock);
42746+
42747+ wrb = wrb_from_mccq(adapter);
42748+ if (!wrb) {
42749+ status = -EBUSY;
42750+ goto err;
42751+ }
42752+
42753+ req = embedded_payload(wrb);
42754+ ctxt = &req->context;
42755+
42756+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42757+ OPCODE_COMMON_GET_HSW_CONFIG);
42758+
42759+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42760+ OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req));
42761+
42762+ req->hdr.domain = domain;
42763+ AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
42764+ intf_id);
42765+ AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
42766+ be_dws_cpu_to_le(req->context, sizeof(req->context));
42767+
42768+ status = be_mcc_notify_wait(adapter);
42769+ if (!status) {
42770+ struct be_cmd_resp_get_hsw_config *resp =
42771+ embedded_payload(wrb);
42772+ be_dws_le_to_cpu(&resp->context,
42773+ sizeof(resp->context));
42774+ vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
42775+ pvid, &resp->context);
42776+ *pvid = le16_to_cpu(vid);
42777+ }
42778+
42779+err:
42780+ spin_unlock_bh(&adapter->mcc_lock);
42781+ return status;
42782+}
42783+
42784+int be_cmd_get_port_speed(struct be_adapter *adapter,
42785+ u8 port_num, u16 *dac_cable_len, u16 *port_speed)
42786+{
42787+ struct be_mcc_wrb *wrb;
42788+ struct be_cmd_req_get_port_speed *req;
42789+ int status = 0;
42790+
42791+ spin_lock_bh(&adapter->mcc_lock);
42792+
42793+ wrb = wrb_from_mccq(adapter);
42794+ if (!wrb) {
42795+ status = -EBUSY;
42796+ goto err;
42797+ }
42798+
42799+ req = embedded_payload(wrb);
42800+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42801+ OPCODE_COMMON_NTWK_GET_LINK_SPEED);
42802+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42803+ OPCODE_COMMON_NTWK_GET_LINK_SPEED,
42804+ sizeof(*req));
42805+ req->port_num = port_num;
42806+ status = be_mcc_notify_wait(adapter);
42807+ if (!status) {
42808+ struct be_cmd_resp_get_port_speed *resp =
42809+ embedded_payload(wrb);
42810+ *dac_cable_len = resp->dac_cable_length;
42811+ *port_speed = resp->mac_speed;
42812+ }
42813+
42814+err:
42815+ spin_unlock_bh(&adapter->mcc_lock);
42816+ return status;
42817+}
42818+
42819+int be_cmd_set_port_speed_v1(struct be_adapter *adapter,
42820+ u8 port_num, u16 mac_speed,
42821+ u16 dac_cable_len)
42822+{
42823+ struct be_mcc_wrb *wrb;
42824+ struct be_cmd_req_set_port_speed_v1 *req;
42825+ int status = 0;
42826+
42827+ spin_lock_bh(&adapter->mcc_lock);
42828+
42829+ wrb = wrb_from_mccq(adapter);
42830+ if (!wrb) {
42831+ status = -EBUSY;
42832+ goto err;
42833+ }
42834+ req = embedded_payload(wrb);
42835+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42836+ OPCODE_COMMON_NTWK_SET_LINK_SPEED);
42837+
42838+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42839+ OPCODE_COMMON_NTWK_SET_LINK_SPEED,
42840+ sizeof(*req));
42841+ req->hdr.version=1;
42842+
42843+ req->port_num = port_num;
42844+ req->virt_port = port_num;
42845+ req->mac_speed = mac_speed;
42846+ req->dac_cable_length = dac_cable_len;
42847+ status = be_mcc_notify_wait(adapter);
42848+err:
42849+ spin_unlock_bh(&adapter->mcc_lock);
42850+ return status;
42851+}
42852+
42853+
42854+/* Uses sync mcc */
42855+#ifdef CONFIG_PALAU
42856+int be_cmd_pass_ext_ioctl(struct be_adapter *adapter, dma_addr_t dma,
42857+ int req_size, void *va)
42858+{
42859+ struct be_mcc_wrb *wrb;
42860+ struct be_sge *sge;
42861+ int status;
42862+ struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) va;
42863+
42864+ spin_lock_bh(&adapter->mcc_lock);
42865+
42866+ wrb = wrb_from_mccq(adapter);
42867+ if (!wrb) {
42868+ status = -EBUSY;
42869+ goto err;
42870+ }
42871+ sge = nonembedded_sgl(wrb);
42872+
42873+ be_wrb_hdr_prepare(wrb, req_size, false, 1, hdr->opcode);
42874+ wrb->tag1 = MCC_WRB_PASS_THRU;
42875+ sge->pa_hi = cpu_to_le32(upper_32_bits(dma));
42876+ sge->pa_lo = cpu_to_le32(dma & 0xFFFFFFFF);
42877+ sge->len = cpu_to_le32(req_size);
42878+
42879+ status = be_mcc_notify_wait(adapter);
42880+err:
42881 spin_unlock_bh(&adapter->mcc_lock);
42882 return status;
42883 }
42884+#endif
42885diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
42886index ad33d55..35aa5c7 100644
42887--- a/drivers/net/benet/be_cmds.h
42888+++ b/drivers/net/benet/be_cmds.h
42889@@ -1,20 +1,23 @@
42890 /*
42891- * Copyright (C) 2005 - 2009 ServerEngines
42892+ * Copyright (C) 2005 - 2011 Emulex
42893 * All rights reserved.
42894 *
42895 * This program is free software; you can redistribute it and/or
42896 * modify it under the terms of the GNU General Public License version 2
42897- * as published by the Free Software Foundation. The full GNU General
42898+ * as published by the Free Software Foundation. The full GNU General
42899 * Public License is included in this distribution in the file called COPYING.
42900 *
42901 * Contact Information:
42902- * linux-drivers@serverengines.com
42903+ * linux-drivers@emulex.com
42904 *
42905- * ServerEngines
42906- * 209 N. Fair Oaks Ave
42907- * Sunnyvale, CA 94085
42908+ * Emulex
42909+ * 3333 Susan Street
42910+ * Costa Mesa, CA 92626
42911 */
42912
42913+#ifndef BE_CMDS_H
42914+#define BE_CMDS_H
42915+
42916 /*
42917 * The driver sends configuration and managements command requests to the
42918 * firmware in the BE. These requests are communicated to the processor
42919@@ -29,9 +32,10 @@ struct be_sge {
42920 u32 len;
42921 };
42922
42923-#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/
42924+#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/
42925 #define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */
42926 #define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */
42927+#define MCC_WRB_PASS_THRU 0xFF /* this wrb is used for pass thru cmd */
42928 struct be_mcc_wrb {
42929 u32 embedded; /* dword 0 */
42930 u32 payload_length; /* dword 1 */
42931@@ -44,24 +48,19 @@ struct be_mcc_wrb {
42932 } payload;
42933 };
42934
42935-#define CQE_FLAGS_VALID_MASK (1 << 31)
42936-#define CQE_FLAGS_ASYNC_MASK (1 << 30)
42937-#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
42938-#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
42939+#define CQE_FLAGS_VALID_MASK (1 << 31)
42940+#define CQE_FLAGS_ASYNC_MASK (1 << 30)
42941+#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
42942+#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
42943
42944 /* Completion Status */
42945 enum {
42946- MCC_STATUS_SUCCESS = 0x0,
42947-/* The client does not have sufficient privileges to execute the command */
42948- MCC_STATUS_INSUFFICIENT_PRIVILEGES = 0x1,
42949-/* A parameter in the command was invalid. */
42950- MCC_STATUS_INVALID_PARAMETER = 0x2,
42951-/* There are insufficient chip resources to execute the command */
42952- MCC_STATUS_INSUFFICIENT_RESOURCES = 0x3,
42953-/* The command is completing because the queue was getting flushed */
42954- MCC_STATUS_QUEUE_FLUSHING = 0x4,
42955-/* The command is completing with a DMA error */
42956- MCC_STATUS_DMA_FAILED = 0x5,
42957+ MCC_STATUS_SUCCESS = 0,
42958+ MCC_STATUS_FAILED = 1,
42959+ MCC_STATUS_ILLEGAL_REQUEST = 2,
42960+ MCC_STATUS_ILLEGAL_FIELD = 3,
42961+ MCC_STATUS_INSUFFICIENT_BUFFER = 4,
42962+ MCC_STATUS_UNAUTHORIZED_REQUEST = 5,
42963 MCC_STATUS_NOT_SUPPORTED = 66
42964 };
42965
42966@@ -81,15 +80,24 @@ struct be_mcc_compl {
42967 * mcc_compl is interpreted as follows:
42968 */
42969 #define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
42970+#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16 /* bits 16 - 23 */
42971 #define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
42972+#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF
42973 #define ASYNC_EVENT_CODE_LINK_STATE 0x1
42974+#define ASYNC_EVENT_CODE_GRP_5 0x5
42975+#define ASYNC_EVENT_QOS_SPEED 0x1
42976+#define ASYNC_EVENT_COS_PRIORITY 0x2
42977+#define ASYNC_EVENT_PVID_STATE 0x3
42978+#define GRP5_TYPE_PRIO_TC_MAP 4
42979+
42980 struct be_async_event_trailer {
42981 u32 code;
42982 };
42983
42984 enum {
42985- ASYNC_EVENT_LINK_DOWN = 0x0,
42986- ASYNC_EVENT_LINK_UP = 0x1
42987+ ASYNC_EVENT_LINK_DOWN = 0x0,
42988+ ASYNC_EVENT_LINK_UP = 0x1,
42989+ ASYNC_EVENT_LOGICAL = 0x2
42990 };
42991
42992 /* When the event code of an async trailer is link-state, the mcc_compl
42993@@ -101,7 +109,51 @@ struct be_async_event_link_state {
42994 u8 port_duplex;
42995 u8 port_speed;
42996 u8 port_fault;
42997- u8 rsvd0[7];
42998+ u8 rsvd0;
42999+ u16 qos_link_speed;
43000+ u32 event_tag;
43001+ struct be_async_event_trailer trailer;
43002+} __packed;
43003+
43004+/* When the event code of an async trailer is GRP-5 and event_type is QOS_SPEED
43005+ * the mcc_compl must be interpreted as follows
43006+ */
43007+struct be_async_event_grp5_qos_link_speed {
43008+ u8 physical_port;
43009+ u8 rsvd[5];
43010+ u16 qos_link_speed;
43011+ u32 event_tag;
43012+ struct be_async_event_trailer trailer;
43013+} __packed;
43014+
43015+/* When the event code of an async trailer is GRP5 and event type is
43016+ * CoS-Priority, the mcc_compl must be interpreted as follows
43017+ */
43018+struct be_async_event_grp5_cos_priority {
43019+ u8 physical_port;
43020+ u8 available_priority_bmap;
43021+ u8 reco_default_priority;
43022+ u8 valid;
43023+ u8 rsvd0;
43024+ u8 event_tag;
43025+ struct be_async_event_trailer trailer;
43026+} __packed;
43027+
43028+/* When the event code of an async trailer is GRP5 and event type is
43029+ * PVID state, the mcc_compl must be interpreted as follows
43030+ */
43031+struct be_async_event_grp5_pvid_state {
43032+ u8 enabled;
43033+ u8 rsvd0;
43034+ u16 tag;
43035+ u32 event_tag;
43036+ u32 rsvd1;
43037+ struct be_async_event_trailer trailer;
43038+} __packed;
43039+
43040+/* GRP5 prio-tc-map event */
43041+struct be_async_event_grp5_prio_tc_map {
43042+ u8 prio_tc_map[8]; /* map[prio] -> tc_id */
43043 struct be_async_event_trailer trailer;
43044 } __packed;
43045
43046@@ -111,41 +163,68 @@ struct be_mcc_mailbox {
43047 };
43048
43049 #define CMD_SUBSYSTEM_COMMON 0x1
43050-#define CMD_SUBSYSTEM_ETH 0x3
43051+#define CMD_SUBSYSTEM_ETH 0x3
43052+#define CMD_SUBSYSTEM_LOWLEVEL 0xb
43053
43054 #define OPCODE_COMMON_NTWK_MAC_QUERY 1
43055 #define OPCODE_COMMON_NTWK_MAC_SET 2
43056 #define OPCODE_COMMON_NTWK_MULTICAST_SET 3
43057-#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4
43058+#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4
43059 #define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5
43060+#define OPCODE_COMMON_READ_FLASHROM 6
43061 #define OPCODE_COMMON_WRITE_FLASHROM 7
43062 #define OPCODE_COMMON_CQ_CREATE 12
43063 #define OPCODE_COMMON_EQ_CREATE 13
43064-#define OPCODE_COMMON_MCC_CREATE 21
43065-#define OPCODE_COMMON_NTWK_RX_FILTER 34
43066+#define OPCODE_COMMON_MCC_CREATE 21
43067+#define OPCODE_COMMON_SET_QOS 28
43068+#define OPCODE_COMMON_MCC_CREATE_EXT 90
43069+#define OPCODE_COMMON_SEEPROM_READ 30
43070+#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
43071+#define OPCODE_COMMON_NTWK_RX_FILTER 34
43072 #define OPCODE_COMMON_GET_FW_VERSION 35
43073 #define OPCODE_COMMON_SET_FLOW_CONTROL 36
43074 #define OPCODE_COMMON_GET_FLOW_CONTROL 37
43075 #define OPCODE_COMMON_SET_FRAME_SIZE 39
43076 #define OPCODE_COMMON_MODIFY_EQ_DELAY 41
43077 #define OPCODE_COMMON_FIRMWARE_CONFIG 42
43078-#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
43079-#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
43080-#define OPCODE_COMMON_MCC_DESTROY 53
43081-#define OPCODE_COMMON_CQ_DESTROY 54
43082-#define OPCODE_COMMON_EQ_DESTROY 55
43083+#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
43084+#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
43085+#define OPCODE_COMMON_MCC_DESTROY 53
43086+#define OPCODE_COMMON_CQ_DESTROY 54
43087+#define OPCODE_COMMON_EQ_DESTROY 55
43088+#define OPCODE_COMMON_NTWK_SET_LINK_SPEED 57
43089 #define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
43090 #define OPCODE_COMMON_NTWK_PMAC_ADD 59
43091 #define OPCODE_COMMON_NTWK_PMAC_DEL 60
43092 #define OPCODE_COMMON_FUNCTION_RESET 61
43093+#define OPCODE_COMMON_MANAGE_FAT 68
43094+#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
43095+#define OPCODE_COMMON_GET_BEACON_STATE 70
43096+#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
43097+#define OPCODE_COMMON_GET_PORT_NAME 77
43098+#define OPCODE_COMMON_SET_FN_PRIVILEGES 100
43099+#define OPCODE_COMMON_GET_PHY_DETAILS 102
43100+#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
43101+#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
43102+#define OPCODE_COMMON_NTWK_GET_LINK_SPEED 134
43103+#define OPCODE_COMMON_GET_HSW_CONFIG 152
43104+#define OPCODE_COMMON_SET_HSW_CONFIG 153
43105+#define OPCODE_COMMON_GET_FN_PRIVILEGES 170
43106
43107+#define OPCODE_ETH_RSS_CONFIG 1
43108 #define OPCODE_ETH_ACPI_CONFIG 2
43109 #define OPCODE_ETH_PROMISCUOUS 3
43110 #define OPCODE_ETH_GET_STATISTICS 4
43111 #define OPCODE_ETH_TX_CREATE 7
43112-#define OPCODE_ETH_RX_CREATE 8
43113-#define OPCODE_ETH_TX_DESTROY 9
43114-#define OPCODE_ETH_RX_DESTROY 10
43115+#define OPCODE_ETH_RX_CREATE 8
43116+#define OPCODE_ETH_TX_DESTROY 9
43117+#define OPCODE_ETH_RX_DESTROY 10
43118+#define OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG 12
43119+#define OPCODE_ETH_PG_FEATURE_QUERY_REQUEST 23
43120+
43121+#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17
43122+#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18
43123+#define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19
43124
43125 struct be_cmd_req_hdr {
43126 u8 opcode; /* dword 0 */
43127@@ -159,7 +238,7 @@ struct be_cmd_req_hdr {
43128 };
43129
43130 #define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
43131-#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */
43132+#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */
43133 struct be_cmd_resp_hdr {
43134 u32 info; /* dword 0 */
43135 u32 status; /* dword 1 */
43136@@ -265,7 +344,7 @@ struct be_cmd_req_pmac_del {
43137 /******************** Create CQ ***************************/
43138 /* Pseudo amap definition in which each bit of the actual structure is defined
43139 * as a byte: used to calculate offset/shift/mask of each field */
43140-struct amap_cq_context {
43141+struct amap_cq_context_be {
43142 u8 cidx[11]; /* dword 0*/
43143 u8 rsvd0; /* dword 0*/
43144 u8 coalescwm[2]; /* dword 0*/
43145@@ -288,11 +367,28 @@ struct amap_cq_context {
43146 u8 rsvd5[32]; /* dword 3*/
43147 } __packed;
43148
43149+struct amap_cq_context_lancer {
43150+ u8 rsvd0[12]; /* dword 0*/
43151+ u8 coalescwm[2]; /* dword 0*/
43152+ u8 nodelay; /* dword 0*/
43153+ u8 rsvd1[12]; /* dword 0*/
43154+ u8 count[2]; /* dword 0*/
43155+ u8 valid; /* dword 0*/
43156+ u8 rsvd2; /* dword 0*/
43157+ u8 eventable; /* dword 0*/
43158+ u8 eqid[16]; /* dword 1*/
43159+ u8 rsvd3[15]; /* dword 1*/
43160+ u8 armed; /* dword 1*/
43161+ u8 rsvd4[32]; /* dword 2*/
43162+ u8 rsvd5[32]; /* dword 3*/
43163+} __packed;
43164+
43165 struct be_cmd_req_cq_create {
43166 struct be_cmd_req_hdr hdr;
43167 u16 num_pages;
43168- u16 rsvd0;
43169- u8 context[sizeof(struct amap_cq_context) / 8];
43170+ u8 page_size;
43171+ u8 rsvd0;
43172+ u8 context[sizeof(struct amap_cq_context_be) / 8];
43173 struct phys_addr pages[8];
43174 } __packed;
43175
43176@@ -302,10 +398,28 @@ struct be_cmd_resp_cq_create {
43177 u16 rsvd0;
43178 } __packed;
43179
43180+struct be_cmd_req_get_fat {
43181+ struct be_cmd_req_hdr hdr;
43182+ u32 fat_operation;
43183+ u32 read_log_offset;
43184+ u32 read_log_length;
43185+ u32 data_buffer_size;
43186+ u32 data_buffer[1];
43187+} __packed;
43188+
43189+struct be_cmd_resp_get_fat {
43190+ struct be_cmd_resp_hdr hdr;
43191+ u32 log_size;
43192+ u32 read_log_length;
43193+ u32 rsvd[2];
43194+ u32 data_buffer[1];
43195+} __packed;
43196+
43197+
43198 /******************** Create MCCQ ***************************/
43199 /* Pseudo amap definition in which each bit of the actual structure is defined
43200 * as a byte: used to calculate offset/shift/mask of each field */
43201-struct amap_mcc_context {
43202+struct amap_mcc_context_be {
43203 u8 con_index[14];
43204 u8 rsvd0[2];
43205 u8 ring_size[4];
43206@@ -320,11 +434,31 @@ struct amap_mcc_context {
43207 u8 rsvd2[32];
43208 } __packed;
43209
43210+struct amap_mcc_context_lancer {
43211+ u8 async_cq_id[16];
43212+ u8 ring_size[4];
43213+ u8 rsvd0[12];
43214+ u8 rsvd1[31];
43215+ u8 valid;
43216+ u8 async_cq_valid[1];
43217+ u8 rsvd2[31];
43218+ u8 rsvd3[32];
43219+} __packed;
43220+
43221 struct be_cmd_req_mcc_create {
43222 struct be_cmd_req_hdr hdr;
43223 u16 num_pages;
43224- u16 rsvd0;
43225- u8 context[sizeof(struct amap_mcc_context) / 8];
43226+ u16 cq_id;
43227+ u8 context[sizeof(struct amap_mcc_context_be) / 8];
43228+ struct phys_addr pages[8];
43229+} __packed;
43230+
43231+struct be_cmd_req_mcc_ext_create {
43232+ struct be_cmd_req_hdr hdr;
43233+ u16 num_pages;
43234+ u16 cq_id;
43235+ u32 async_event_bitmap[1];
43236+ u8 context[sizeof(struct amap_mcc_context_be) / 8];
43237 struct phys_addr pages[8];
43238 } __packed;
43239
43240@@ -335,49 +469,32 @@ struct be_cmd_resp_mcc_create {
43241 } __packed;
43242
43243 /******************** Create TxQ ***************************/
43244-#define BE_ETH_TX_RING_TYPE_STANDARD 2
43245+#define ETX_QUEUE_TYPE_STANDARD 0x2
43246+#define ETX_QUEUE_TYPE_PRIORITY 0x10
43247 #define BE_ULP1_NUM 1
43248
43249-/* Pseudo amap definition in which each bit of the actual structure is defined
43250- * as a byte: used to calculate offset/shift/mask of each field */
43251-struct amap_tx_context {
43252- u8 rsvd0[16]; /* dword 0 */
43253- u8 tx_ring_size[4]; /* dword 0 */
43254- u8 rsvd1[26]; /* dword 0 */
43255- u8 pci_func_id[8]; /* dword 1 */
43256- u8 rsvd2[9]; /* dword 1 */
43257- u8 ctx_valid; /* dword 1 */
43258- u8 cq_id_send[16]; /* dword 2 */
43259- u8 rsvd3[16]; /* dword 2 */
43260- u8 rsvd4[32]; /* dword 3 */
43261- u8 rsvd5[32]; /* dword 4 */
43262- u8 rsvd6[32]; /* dword 5 */
43263- u8 rsvd7[32]; /* dword 6 */
43264- u8 rsvd8[32]; /* dword 7 */
43265- u8 rsvd9[32]; /* dword 8 */
43266- u8 rsvd10[32]; /* dword 9 */
43267- u8 rsvd11[32]; /* dword 10 */
43268- u8 rsvd12[32]; /* dword 11 */
43269- u8 rsvd13[32]; /* dword 12 */
43270- u8 rsvd14[32]; /* dword 13 */
43271- u8 rsvd15[32]; /* dword 14 */
43272- u8 rsvd16[32]; /* dword 15 */
43273-} __packed;
43274-
43275 struct be_cmd_req_eth_tx_create {
43276 struct be_cmd_req_hdr hdr;
43277 u8 num_pages;
43278 u8 ulp_num;
43279- u8 type;
43280- u8 bound_port;
43281- u8 context[sizeof(struct amap_tx_context) / 8];
43282+ u16 type;
43283+ u16 if_id;
43284+ u8 queue_size;
43285+ u8 rsvd1;
43286+ u32 rsvd2;
43287+ u16 cq_id;
43288+ u16 rsvd3;
43289+ u32 rsvd4[13];
43290 struct phys_addr pages[8];
43291 } __packed;
43292
43293 struct be_cmd_resp_eth_tx_create {
43294 struct be_cmd_resp_hdr hdr;
43295 u16 cid;
43296- u16 rsvd0;
43297+ u16 rid;
43298+ u32 db_offset;
43299+ u8 tc_id;
43300+ u8 rsvd0[3];
43301 } __packed;
43302
43303 /******************** Create RxQ ***************************/
43304@@ -396,7 +513,7 @@ struct be_cmd_req_eth_rx_create {
43305 struct be_cmd_resp_eth_rx_create {
43306 struct be_cmd_resp_hdr hdr;
43307 u16 id;
43308- u8 cpu_id;
43309+ u8 rss_id;
43310 u8 rsvd0;
43311 } __packed;
43312
43313@@ -429,14 +546,15 @@ enum be_if_flags {
43314 BE_IF_FLAGS_VLAN = 0x100,
43315 BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
43316 BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
43317- BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800
43318+ BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800,
43319+ BE_IF_FLAGS_MULTICAST = 0x1000
43320 };
43321
43322 /* An RX interface is an object with one or more MAC addresses and
43323 * filtering capabilities. */
43324 struct be_cmd_req_if_create {
43325 struct be_cmd_req_hdr hdr;
43326- u32 version; /* ignore currntly */
43327+ u32 version; /* ignore currently */
43328 u32 capability_flags;
43329 u32 enable_flags;
43330 u8 mac_addr[ETH_ALEN];
43331@@ -458,7 +576,7 @@ struct be_cmd_req_if_destroy {
43332 };
43333
43334 /*************** HW Stats Get **********************************/
43335-struct be_port_rxf_stats {
43336+struct be_port_rxf_stats_v0 {
43337 u32 rx_bytes_lsd; /* dword 0*/
43338 u32 rx_bytes_msd; /* dword 1*/
43339 u32 rx_total_frames; /* dword 2*/
43340@@ -527,8 +645,8 @@ struct be_port_rxf_stats {
43341 u32 rx_input_fifo_overflow; /* dword 65*/
43342 };
43343
43344-struct be_rxf_stats {
43345- struct be_port_rxf_stats port[2];
43346+struct be_rxf_stats_v0 {
43347+ struct be_port_rxf_stats_v0 port[2];
43348 u32 rx_drops_no_pbuf; /* dword 132*/
43349 u32 rx_drops_no_txpb; /* dword 133*/
43350 u32 rx_drops_no_erx_descr; /* dword 134*/
43351@@ -545,31 +663,51 @@ struct be_rxf_stats {
43352 u32 rx_drops_invalid_ring; /* dword 145*/
43353 u32 forwarded_packets; /* dword 146*/
43354 u32 rx_drops_mtu; /* dword 147*/
43355- u32 rsvd0[15];
43356+ u32 rsvd0[7];
43357+ u32 port0_jabber_events;
43358+ u32 port1_jabber_events;
43359+ u32 rsvd1[6];
43360 };
43361
43362-struct be_erx_stats {
43363+struct be_erx_stats_v0 {
43364 u32 rx_drops_no_fragments[44]; /* dwordS 0 to 43*/
43365- u32 debug_wdma_sent_hold; /* dword 44*/
43366- u32 debug_wdma_pbfree_sent_hold; /* dword 45*/
43367- u32 debug_wdma_zerobyte_pbfree_sent_hold; /* dword 46*/
43368- u32 debug_pmem_pbuf_dealloc; /* dword 47*/
43369+ u32 rsvd[4];
43370 };
43371
43372-struct be_hw_stats {
43373- struct be_rxf_stats rxf;
43374+struct be_pmem_stats {
43375+ u32 eth_red_drops;
43376+ u32 rsvd[5];
43377+};
43378+
43379+struct be_hw_stats_v0 {
43380+ struct be_rxf_stats_v0 rxf;
43381 u32 rsvd[48];
43382- struct be_erx_stats erx;
43383+ struct be_erx_stats_v0 erx;
43384+ struct be_pmem_stats pmem;
43385 };
43386
43387-struct be_cmd_req_get_stats {
43388+struct be_cmd_req_get_stats_v0 {
43389 struct be_cmd_req_hdr hdr;
43390- u8 rsvd[sizeof(struct be_hw_stats)];
43391+ u8 rsvd[sizeof(struct be_hw_stats_v0)];
43392 };
43393
43394-struct be_cmd_resp_get_stats {
43395+struct be_cmd_resp_get_stats_v0 {
43396 struct be_cmd_resp_hdr hdr;
43397- struct be_hw_stats hw_stats;
43398+ struct be_hw_stats_v0 hw_stats;
43399+};
43400+
43401+struct be_cmd_req_get_cntl_addnl_attribs {
43402+ struct be_cmd_req_hdr hdr;
43403+ u8 rsvd[8];
43404+};
43405+
43406+struct be_cmd_resp_get_cntl_addnl_attribs {
43407+ struct be_cmd_resp_hdr hdr;
43408+ u16 ipl_file_number;
43409+ u8 ipl_file_version;
43410+ u8 rsvd0;
43411+ u8 on_die_temperature; /* in degrees centigrade*/
43412+ u8 rsvd1[3];
43413 };
43414
43415 struct be_cmd_req_vlan_config {
43416@@ -581,30 +719,22 @@ struct be_cmd_req_vlan_config {
43417 u16 normal_vlan[64];
43418 } __packed;
43419
43420-struct be_cmd_req_promiscuous_config {
43421- struct be_cmd_req_hdr hdr;
43422- u8 port0_promiscuous;
43423- u8 port1_promiscuous;
43424- u16 rsvd0;
43425-} __packed;
43426-
43427+/******************** RX FILTER ******************************/
43428+#define BE_MAX_MC 64 /* set mcast promisc if > 64 */
43429 struct macaddr {
43430 u8 byte[ETH_ALEN];
43431 };
43432
43433-struct be_cmd_req_mcast_mac_config {
43434+struct be_cmd_req_rx_filter {
43435 struct be_cmd_req_hdr hdr;
43436- u16 num_mac;
43437- u8 promiscuous;
43438- u8 interface_id;
43439- struct macaddr mac[32];
43440-} __packed;
43441-
43442-static inline struct be_hw_stats *
43443-hw_stats_from_cmd(struct be_cmd_resp_get_stats *cmd)
43444-{
43445- return &cmd->hw_stats;
43446-}
43447+ u32 global_flags_mask;
43448+ u32 global_flags;
43449+ u32 if_flags_mask;
43450+ u32 if_flags;
43451+ u32 if_id;
43452+ u32 mcast_num;
43453+ struct macaddr mcast_mac[BE_MAX_MC];
43454+};
43455
43456 /******************** Link Status Query *******************/
43457 struct be_cmd_req_link_status {
43458@@ -619,13 +749,18 @@ enum {
43459 };
43460
43461 enum {
43462- PHY_LINK_SPEED_ZERO = 0x0, /* => No link */
43463+ PHY_LINK_SPEED_ZERO = 0x0, /* => No link */
43464 PHY_LINK_SPEED_10MBPS = 0x1,
43465 PHY_LINK_SPEED_100MBPS = 0x2,
43466 PHY_LINK_SPEED_1GBPS = 0x3,
43467 PHY_LINK_SPEED_10GBPS = 0x4
43468 };
43469
43470+enum {
43471+ LINK_DOWN = 0x0,
43472+ LINK_UP = 0X1
43473+};
43474+
43475 struct be_cmd_resp_link_status {
43476 struct be_cmd_resp_hdr hdr;
43477 u8 physical_port;
43478@@ -634,9 +769,47 @@ struct be_cmd_resp_link_status {
43479 u8 mac_fault;
43480 u8 mgmt_mac_duplex;
43481 u8 mgmt_mac_speed;
43482- u16 rsvd0;
43483+ u16 link_speed;
43484+ u32 logical_link_status;
43485 } __packed;
43486
43487+/******************** Port Identification ***************************/
43488+/* Identifies the type of port attached to NIC */
43489+struct be_cmd_req_port_type {
43490+ struct be_cmd_req_hdr hdr;
43491+ u32 page_num;
43492+ u32 port;
43493+};
43494+
43495+enum {
43496+ TR_PAGE_A0 = 0xa0,
43497+ TR_PAGE_A2 = 0xa2
43498+};
43499+
43500+struct be_cmd_resp_port_type {
43501+ struct be_cmd_resp_hdr hdr;
43502+ u32 page_num;
43503+ u32 port;
43504+ struct data {
43505+ u8 identifier;
43506+ u8 identifier_ext;
43507+ u8 connector;
43508+ u8 transceiver[8];
43509+ u8 rsvd0[3];
43510+ u8 length_km;
43511+ u8 length_hm;
43512+ u8 length_om1;
43513+ u8 length_om2;
43514+ u8 length_cu;
43515+ u8 length_cu_m;
43516+ u8 vendor_name[16];
43517+ u8 rsvd;
43518+ u8 vendor_oui[3];
43519+ u8 vendor_pn[16];
43520+ u8 vendor_rev[4];
43521+ } data;
43522+};
43523+
43524 /******************** Get FW Version *******************/
43525 struct be_cmd_req_get_fw_version {
43526 struct be_cmd_req_hdr hdr;
43527@@ -686,9 +859,13 @@ struct be_cmd_resp_modify_eq_delay {
43528 } __packed;
43529
43530 /******************** Get FW Config *******************/
43531+#define FLEX10_MODE 0x400
43532+#define VNIC_MODE 0x20000
43533+#define UMC_ENABLED 0x1000000
43534+
43535 struct be_cmd_req_query_fw_cfg {
43536 struct be_cmd_req_hdr hdr;
43537- u32 rsvd[30];
43538+ u32 rsvd[31];
43539 };
43540
43541 struct be_cmd_resp_query_fw_cfg {
43542@@ -696,10 +873,61 @@ struct be_cmd_resp_query_fw_cfg {
43543 u32 be_config_number;
43544 u32 asic_revision;
43545 u32 phys_port;
43546- u32 function_cap;
43547+ u32 function_mode;
43548 u32 rsvd[26];
43549+ u32 function_caps;
43550 };
43551
43552+/******************** RSS Config *******************/
43553+/* RSS types */
43554+#define RSS_ENABLE_NONE 0x0
43555+#define RSS_ENABLE_IPV4 0x1
43556+#define RSS_ENABLE_TCP_IPV4 0x2
43557+#define RSS_ENABLE_IPV6 0x4
43558+#define RSS_ENABLE_TCP_IPV6 0x8
43559+
43560+struct be_cmd_req_rss_config {
43561+ struct be_cmd_req_hdr hdr;
43562+ u32 if_id;
43563+ u16 enable_rss;
43564+ u16 cpu_table_size_log2;
43565+ u32 hash[10];
43566+ u8 cpu_table[128];
43567+ u8 flush;
43568+ u8 rsvd0[3];
43569+};
43570+
43571+/******************** Port Beacon ***************************/
43572+
43573+#define BEACON_STATE_ENABLED 0x1
43574+#define BEACON_STATE_DISABLED 0x0
43575+
43576+struct be_cmd_req_enable_disable_beacon {
43577+ struct be_cmd_req_hdr hdr;
43578+ u8 port_num;
43579+ u8 beacon_state;
43580+ u8 beacon_duration;
43581+ u8 status_duration;
43582+} __packed;
43583+
43584+struct be_cmd_resp_enable_disable_beacon {
43585+ struct be_cmd_resp_hdr resp_hdr;
43586+ u32 rsvd0;
43587+} __packed;
43588+
43589+struct be_cmd_req_get_beacon_state {
43590+ struct be_cmd_req_hdr hdr;
43591+ u8 port_num;
43592+ u8 rsvd0;
43593+ u16 rsvd1;
43594+} __packed;
43595+
43596+struct be_cmd_resp_get_beacon_state {
43597+ struct be_cmd_resp_hdr resp_hdr;
43598+ u8 beacon_state;
43599+ u8 rsvd0[3];
43600+} __packed;
43601+
43602 /****************** Firmware Flash ******************/
43603 struct flashrom_params {
43604 u32 op_code;
43605@@ -714,17 +942,468 @@ struct be_cmd_write_flashrom {
43606 struct flashrom_params params;
43607 };
43608
43609+/************************ WOL *******************************/
43610+struct be_cmd_req_acpi_wol_magic_config {
43611+ struct be_cmd_req_hdr hdr;
43612+ u32 rsvd0[145];
43613+ u8 magic_mac[6];
43614+ u8 rsvd2[2];
43615+} __packed;
43616+
43617+/********************** LoopBack test *********************/
43618+struct be_cmd_req_loopback_test {
43619+ struct be_cmd_req_hdr hdr;
43620+ u32 loopback_type;
43621+ u32 num_pkts;
43622+ u64 pattern;
43623+ u32 src_port;
43624+ u32 dest_port;
43625+ u32 pkt_size;
43626+};
43627+
43628+struct be_cmd_resp_loopback_test {
43629+ struct be_cmd_resp_hdr resp_hdr;
43630+ u32 status;
43631+ u32 num_txfer;
43632+ u32 num_rx;
43633+ u32 miscomp_off;
43634+ u32 ticks_compl;
43635+};
43636+
43637+struct be_cmd_req_set_lmode {
43638+ struct be_cmd_req_hdr hdr;
43639+ u8 src_port;
43640+ u8 dest_port;
43641+ u8 loopback_type;
43642+ u8 loopback_state;
43643+};
43644+
43645+struct be_cmd_resp_set_lmode {
43646+ struct be_cmd_resp_hdr resp_hdr;
43647+ u8 rsvd0[4];
43648+};
43649+
43650+/********************** DDR DMA test *********************/
43651+struct be_cmd_req_ddrdma_test {
43652+ struct be_cmd_req_hdr hdr;
43653+ u64 pattern;
43654+ u32 byte_count;
43655+ u32 rsvd0;
43656+ u8 snd_buff[4096];
43657+ u8 rsvd1[4096];
43658+};
43659+
43660+struct be_cmd_resp_ddrdma_test {
43661+ struct be_cmd_resp_hdr hdr;
43662+ u64 pattern;
43663+ u32 byte_cnt;
43664+ u32 snd_err;
43665+ u8 rsvd0[4096];
43666+ u8 rcv_buff[4096];
43667+};
43668+
43669+/*********************** SEEPROM Read ***********************/
43670+
43671+#define BE_READ_SEEPROM_LEN 1024
43672+struct be_cmd_req_seeprom_read {
43673+ struct be_cmd_req_hdr hdr;
43674+ u8 rsvd0[BE_READ_SEEPROM_LEN];
43675+};
43676+
43677+struct be_cmd_resp_seeprom_read {
43678+ struct be_cmd_req_hdr hdr;
43679+ u8 seeprom_data[BE_READ_SEEPROM_LEN];
43680+};
43681+
43682+enum {
43683+ PHY_TYPE_CX4_10GB = 0,
43684+ PHY_TYPE_XFP_10GB,
43685+ PHY_TYPE_SFP_1GB,
43686+ PHY_TYPE_SFP_PLUS_10GB,
43687+ PHY_TYPE_KR_10GB,
43688+ PHY_TYPE_KX4_10GB,
43689+ PHY_TYPE_BASET_10GB,
43690+ PHY_TYPE_BASET_1GB,
43691+ PHY_TYPE_BASEX_1GB,
43692+ PHY_TYPE_SGMII,
43693+ PHY_TYPE_DISABLED = 255
43694+};
43695+
43696+#define BE_AN_EN 0x2
43697+#define BE_PAUSE_SYM_EN 0x80
43698+
43699+struct be_cmd_req_get_phy_info {
43700+ struct be_cmd_req_hdr hdr;
43701+ u8 rsvd0[24];
43702+};
43703+
43704+struct be_phy_info {
43705+ u16 phy_type;
43706+ u16 interface_type;
43707+ u32 misc_params;
43708+ u16 ext_phy_details;
43709+ u16 rsvd;
43710+ u16 auto_speeds_supported;
43711+ u16 fixed_speeds_supported;
43712+ u32 future_use[2];
43713+};
43714+
43715+struct be_cmd_resp_get_phy_info {
43716+ struct be_cmd_req_hdr hdr;
43717+ struct be_phy_info phy_info;
43718+};
43719+
43720+/*********************** Set QOS ***********************/
43721+
43722+#define BE_QOS_BITS_NIC 1
43723+
43724+struct be_cmd_req_set_qos {
43725+ struct be_cmd_req_hdr hdr;
43726+ u32 valid_bits;
43727+ u32 max_bps_nic;
43728+ u32 rsvd[7];
43729+};
43730+
43731+struct be_cmd_resp_set_qos {
43732+ struct be_cmd_resp_hdr hdr;
43733+ u32 rsvd;
43734+};
43735+
43736+/*********************** Controller Attributes ***********************/
43737+struct be_cmd_req_cntl_attribs {
43738+ struct be_cmd_req_hdr hdr;
43739+};
43740+
43741+struct be_cmd_resp_cntl_attribs {
43742+ struct be_cmd_resp_hdr hdr;
43743+ struct mgmt_controller_attrib attribs;
43744+};
43745+
43746+/******************* get port names ***************/
43747+struct be_cmd_req_get_port_name {
43748+ struct be_cmd_req_hdr hdr;
43749+ u32 rsvd0;
43750+};
43751+
43752+struct be_cmd_resp_get_port_name {
43753+ struct be_cmd_req_hdr hdr;
43754+ u8 port0_name;
43755+ u8 port1_name;
43756+ u8 rsvd0[2];
43757+};
43758+
43759+struct be_cmd_resp_get_port_name_v1 {
43760+ struct be_cmd_req_hdr hdr;
43761+ u32 pt : 2;
43762+ u32 rsvd0 : 30;
43763+ u8 port0_name;
43764+ u8 port1_name;
43765+ u8 port2_name;
43766+ u8 port3_name;
43767+};
43768+
43769+/*********************** Set driver function ***********************/
43770+#define CAPABILITY_SW_TIMESTAMPS 2
43771+#define CAPABILITY_BE3_NATIVE_ERX_API 4
43772+
43773+struct be_cmd_req_set_func_cap {
43774+ struct be_cmd_req_hdr hdr;
43775+ u32 valid_cap_flags;
43776+ u32 cap_flags;
43777+ u8 rsvd[212];
43778+};
43779+
43780+struct be_cmd_resp_set_func_cap {
43781+ struct be_cmd_resp_hdr hdr;
43782+ u32 valid_cap_flags;
43783+ u32 cap_flags;
43784+ u8 rsvd[212];
43785+};
43786+
43787+/*********************** PG Query Request ****************************/
43788+#define REQ_PG_QUERY 0x1
43789+#define REQ_PG_FEAT 0x1
43790+struct be_cmd_req_pg {
43791+ struct be_cmd_req_hdr hdr;
43792+ u32 query;
43793+ u32 pfc_pg;
43794+};
43795+
43796+struct be_cmd_resp_pg {
43797+ struct be_cmd_resp_hdr hdr;
43798+ u32 pfc_pg;
43799+ u32 num_tx_rings;
43800+};
43801+
43802+/*********************** Function Privileges ***********************/
43803+enum {
43804+ BE_PRIV_DEFAULT = 0x1,
43805+ BE_PRIV_LNKQUERY = 0x2,
43806+ BE_PRIV_LNKSTATS = 0x4,
43807+ BE_PRIV_LNKMGMT = 0x8,
43808+ BE_PRIV_LNKDIAG = 0x10,
43809+ BE_PRIV_UTILQUERY = 0x20,
43810+ BE_PRIV_FILTMGMT = 0x40,
43811+ BE_PRIV_IFACEMGMT = 0x80,
43812+ BE_PRIV_VHADM = 0x100,
43813+ BE_PRIV_DEVCFG = 0x200,
43814+ BE_PRIV_DEVSEC = 0x400
43815+};
43816+
43817+struct be_cmd_req_get_fn_privileges {
43818+ struct be_cmd_req_hdr hdr;
43819+ u32 rsvd;
43820+};
43821+
43822+struct be_cmd_resp_get_fn_privileges {
43823+ struct be_cmd_resp_hdr hdr;
43824+ u32 privilege_mask;
43825+};
43826+
43827+struct be_cmd_req_set_fn_privileges {
43828+ struct be_cmd_req_hdr hdr;
43829+ u32 privilege_mask;
43830+};
43831+
43832+struct be_cmd_resp_set_fn_privileges {
43833+ struct be_cmd_resp_hdr hdr;
43834+ u32 prev_privilege_mask;
43835+};
43836+
43837+/*********************** HSW Config ***********************/
43838+struct amap_set_hsw_context {
43839+ u8 interface_id[16];
43840+ u8 rsvd0[14];
43841+ u8 pvid_valid;
43842+ u8 rsvd1;
43843+ u8 rsvd2[16];
43844+ u8 pvid[16];
43845+ u8 rsvd3[32];
43846+ u8 rsvd4[32];
43847+ u8 rsvd5[32];
43848+} __packed;
43849+
43850+struct be_cmd_req_set_hsw_config {
43851+ struct be_cmd_req_hdr hdr;
43852+ u8 context[sizeof(struct amap_set_hsw_context) / 8];
43853+} __packed;
43854+
43855+struct be_cmd_resp_set_hsw_config {
43856+ struct be_cmd_resp_hdr hdr;
43857+ u32 rsvd;
43858+};
43859+
43860+struct amap_get_hsw_req_context {
43861+ u8 interface_id[16];
43862+ u8 rsvd0[14];
43863+ u8 pvid_valid;
43864+ u8 pport;
43865+} __packed;
43866+
43867+struct amap_get_hsw_resp_context {
43868+ u8 rsvd1[16];
43869+ u8 pvid[16];
43870+ u8 rsvd2[32];
43871+ u8 rsvd3[32];
43872+ u8 rsvd4[32];
43873+} __packed;
43874+
43875+struct be_cmd_req_get_hsw_config {
43876+ struct be_cmd_req_hdr hdr;
43877+ u8 context[sizeof(struct amap_get_hsw_req_context) / 8];
43878+} __packed;
43879+
43880+struct be_cmd_resp_get_hsw_config {
43881+ struct be_cmd_resp_hdr hdr;
43882+ u8 context[sizeof(struct amap_get_hsw_resp_context) / 8];
43883+ u32 rsvd;
43884+};
43885+
43886+/*************** Set speed ********************/
43887+struct be_cmd_req_set_port_speed_v1 {
43888+ struct be_cmd_req_hdr hdr;
43889+ u8 port_num;
43890+ u8 virt_port;
43891+ u16 mac_speed;
43892+ u16 dac_cable_length;
43893+ u16 rsvd0;
43894+};
43895+
43896+struct be_cmd_resp_set_port_speed_v1 {
43897+ struct be_cmd_resp_hdr hdr;
43898+ u32 rsvd0;
43899+};
43900+
43901+/************** get port speed *******************/
43902+struct be_cmd_req_get_port_speed {
43903+ struct be_cmd_req_hdr hdr;
43904+ u8 port_num;
43905+};
43906+
43907+struct be_cmd_resp_get_port_speed {
43908+ struct be_cmd_req_hdr hdr;
43909+ u16 mac_speed;
43910+ u16 dac_cable_length;
43911+};
43912+
43913+/*************** HW Stats Get v1 **********************************/
43914+#define BE_TXP_SW_SZ 48
43915+struct be_port_rxf_stats_v1 {
43916+ u32 rsvd0[12];
43917+ u32 rx_crc_errors;
43918+ u32 rx_alignment_symbol_errors;
43919+ u32 rx_pause_frames;
43920+ u32 rx_priority_pause_frames;
43921+ u32 rx_control_frames;
43922+ u32 rx_in_range_errors;
43923+ u32 rx_out_range_errors;
43924+ u32 rx_frame_too_long;
43925+ u32 rx_address_match_errors;
43926+ u32 rx_dropped_too_small;
43927+ u32 rx_dropped_too_short;
43928+ u32 rx_dropped_header_too_small;
43929+ u32 rx_dropped_tcp_length;
43930+ u32 rx_dropped_runt;
43931+ u32 rsvd1[10];
43932+ u32 rx_ip_checksum_errs;
43933+ u32 rx_tcp_checksum_errs;
43934+ u32 rx_udp_checksum_errs;
43935+ u32 rsvd2[7];
43936+ u32 rx_switched_unicast_packets;
43937+ u32 rx_switched_multicast_packets;
43938+ u32 rx_switched_broadcast_packets;
43939+ u32 rsvd3[3];
43940+ u32 tx_pauseframes;
43941+ u32 tx_priority_pauseframes;
43942+ u32 tx_controlframes;
43943+ u32 rsvd4[10];
43944+ u32 rxpp_fifo_overflow_drop;
43945+ u32 rx_input_fifo_overflow_drop;
43946+ u32 pmem_fifo_overflow_drop;
43947+ u32 jabber_events;
43948+ u32 rsvd5[3];
43949+};
43950+
43951+
43952+struct be_rxf_stats_v1 {
43953+ struct be_port_rxf_stats_v1 port[4];
43954+ u32 rsvd0[2];
43955+ u32 rx_drops_no_pbuf;
43956+ u32 rx_drops_no_txpb;
43957+ u32 rx_drops_no_erx_descr;
43958+ u32 rx_drops_no_tpre_descr;
43959+ u32 rsvd1[6];
43960+ u32 rx_drops_too_many_frags;
43961+ u32 rx_drops_invalid_ring;
43962+ u32 forwarded_packets;
43963+ u32 rx_drops_mtu;
43964+ u32 rsvd2[14];
43965+};
43966+
43967+struct be_erx_stats_v1 {
43968+ u32 rx_drops_no_fragments[68]; /* dwordS 0 to 67*/
43969+ u32 rsvd[4];
43970+};
43971+
43972+struct be_hw_stats_v1 {
43973+ struct be_rxf_stats_v1 rxf;
43974+ u32 rsvd0[BE_TXP_SW_SZ];
43975+ struct be_erx_stats_v1 erx;
43976+ struct be_pmem_stats pmem;
43977+ u32 rsvd1[3];
43978+};
43979+
43980+struct be_cmd_req_get_stats_v1 {
43981+ struct be_cmd_req_hdr hdr;
43982+ u8 rsvd[sizeof(struct be_hw_stats_v1)];
43983+};
43984+
43985+struct be_cmd_resp_get_stats_v1 {
43986+ struct be_cmd_resp_hdr hdr;
43987+ struct be_hw_stats_v1 hw_stats;
43988+};
43989+
43990+static inline void *
43991+hw_stats_from_cmd(struct be_adapter *adapter)
43992+{
43993+ if (adapter->generation == BE_GEN3) {
43994+ struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
43995+
43996+ return &cmd->hw_stats;
43997+ } else {
43998+ struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
43999+
44000+ return &cmd->hw_stats;
44001+ }
44002+}
44003+
44004+static inline void *be_port_rxf_stats_from_cmd(struct be_adapter *adapter)
44005+{
44006+ if (adapter->generation == BE_GEN3) {
44007+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
44008+ struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
44009+
44010+ return &rxf_stats->port[adapter->port_num];
44011+ } else {
44012+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
44013+ struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
44014+
44015+ return &rxf_stats->port[adapter->port_num];
44016+ }
44017+}
44018+
44019+static inline void *be_rxf_stats_from_cmd(struct be_adapter *adapter)
44020+{
44021+ if (adapter->generation == BE_GEN3) {
44022+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
44023+
44024+ return &hw_stats->rxf;
44025+ } else {
44026+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
44027+
44028+ return &hw_stats->rxf;
44029+ }
44030+}
44031+
44032+static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
44033+{
44034+ if (adapter->generation == BE_GEN3) {
44035+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
44036+
44037+ return &hw_stats->erx;
44038+ } else {
44039+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
44040+
44041+ return &hw_stats->erx;
44042+ }
44043+}
44044+
44045+static inline void *be_pmem_stats_from_cmd(struct be_adapter *adapter)
44046+{
44047+ if (adapter->generation == BE_GEN3) {
44048+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
44049+
44050+ return &hw_stats->pmem;
44051+ } else {
44052+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
44053+
44054+ return &hw_stats->pmem;
44055+ }
44056+}
44057+
44058 extern int be_pci_fnum_get(struct be_adapter *adapter);
44059 extern int be_cmd_POST(struct be_adapter *adapter);
44060 extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
44061 u8 type, bool permanent, u32 if_handle);
44062 extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
44063- u32 if_id, u32 *pmac_id);
44064-extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
44065+ u32 if_id, u32 *pmac_id, u32 domain);
44066+extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id,
44067+ u32 domain);
44068 extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
44069 u32 en_flags, u8 *mac, bool pmac_invalid,
44070- u32 *if_handle, u32 *pmac_id);
44071-extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
44072+ u32 *if_handle, u32 *pmac_id, u32 domain);
44073+extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
44074+ u32 domain);
44075 extern int be_cmd_eq_create(struct be_adapter *adapter,
44076 struct be_queue_info *eq, int eq_delay);
44077 extern int be_cmd_cq_create(struct be_adapter *adapter,
44078@@ -736,36 +1415,92 @@ extern int be_cmd_mccq_create(struct be_adapter *adapter,
44079 struct be_queue_info *cq);
44080 extern int be_cmd_txq_create(struct be_adapter *adapter,
44081 struct be_queue_info *txq,
44082- struct be_queue_info *cq);
44083+ struct be_queue_info *cq, u8 *tc_id);
44084 extern int be_cmd_rxq_create(struct be_adapter *adapter,
44085 struct be_queue_info *rxq, u16 cq_id,
44086 u16 frag_size, u16 max_frame_size, u32 if_id,
44087- u32 rss);
44088+ u32 rss, u8 *rss_id);
44089 extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
44090 int type);
44091+extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
44092+ struct be_queue_info *q);
44093 extern int be_cmd_link_status_query(struct be_adapter *adapter,
44094- bool *link_up);
44095+ int *link_status, u8 *mac_speed, u16 *link_speed, u32 dom);
44096 extern int be_cmd_reset(struct be_adapter *adapter);
44097 extern int be_cmd_get_stats(struct be_adapter *adapter,
44098 struct be_dma_mem *nonemb_cmd);
44099-extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver);
44100+extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
44101+ char *fw_on_flash);
44102
44103 extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
44104 extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
44105 u16 *vtag_array, u32 num, bool untagged,
44106 bool promiscuous);
44107-extern int be_cmd_promiscuous_config(struct be_adapter *adapter,
44108- u8 port_num, bool en);
44109-extern int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
44110- struct dev_mc_list *mc_list, u32 mc_count);
44111+extern int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
44112 extern int be_cmd_set_flow_control(struct be_adapter *adapter,
44113 u32 tx_fc, u32 rx_fc);
44114 extern int be_cmd_get_flow_control(struct be_adapter *adapter,
44115 u32 *tx_fc, u32 *rx_fc);
44116-extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
44117- u32 *port_num, u32 *cap);
44118+extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
44119+ u32 *function_mode, u32 *functions_caps);
44120 extern int be_cmd_reset_function(struct be_adapter *adapter);
44121-extern int be_process_mcc(struct be_adapter *adapter);
44122+extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
44123+ u16 table_size);
44124+extern int be_process_mcc(struct be_adapter *adapter, int *status);
44125+extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
44126+ u8 port_num, u8 beacon, u8 status, u8 state);
44127+extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
44128+ u8 port_num, u32 *state);
44129+extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
44130+ u8 *connector);
44131 extern int be_cmd_write_flashrom(struct be_adapter *adapter,
44132 struct be_dma_mem *cmd, u32 flash_oper,
44133 u32 flash_opcode, u32 buf_size);
44134+int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
44135+ int offset);
44136+extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
44137+ struct be_dma_mem *nonemb_cmd);
44138+extern int be_cmd_fw_init(struct be_adapter *adapter);
44139+extern int be_cmd_fw_clean(struct be_adapter *adapter);
44140+extern void be_async_mcc_enable(struct be_adapter *adapter);
44141+extern void be_async_mcc_disable(struct be_adapter *adapter);
44142+extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
44143+ u32 loopback_type, u32 pkt_size,
44144+ u32 num_pkts, u64 pattern);
44145+extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
44146+ u32 byte_cnt, struct be_dma_mem *cmd);
44147+extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
44148+ struct be_dma_mem *nonemb_cmd);
44149+extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
44150+ u8 loopback_type, u8 enable);
44151+extern int be_cmd_get_phy_info(struct be_adapter *adapter,
44152+ struct be_phy_info *phy_info);
44153+extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
44154+extern void be_detect_dump_ue(struct be_adapter *adapter);
44155+extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
44156+extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
44157+extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
44158+extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
44159+extern int be_cmd_req_native_mode(struct be_adapter *adapter);
44160+extern int be_cmd_query_port_names_v0(struct be_adapter *adapter, u8 *port_name);
44161+extern int be_cmd_query_port_names_v1(struct be_adapter *adapter, u8 *port_name);
44162+extern int be_cmd_req_pg_pfc(struct be_adapter *adapter, int *fw_num_txqs);
44163+
44164+extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
44165+ u32 *privilege, u32 domain);
44166+extern int be_cmd_set_fn_privileges(struct be_adapter *adapter,
44167+ u32 mask, u32 *prev, u32 domain);
44168+extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
44169+ u32 domain, u16 intf_id);
44170+extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
44171+ u32 domain, u16 intf_id);
44172+extern int be_cmd_set_port_speed_v1(struct be_adapter *adapter, u8 port_num,
44173+ u16 mac_speed, u16 dac_cable_len);
44174+extern int be_cmd_get_port_speed(struct be_adapter *adapter, u8 port_num,
44175+ u16 *dac_cable_len, u16 *port_speed);
44176+#ifdef CONFIG_PALAU
44177+int be_cmd_pass_ext_ioctl(struct be_adapter *adapter, dma_addr_t dma,
44178+ int req_size, void *va);
44179+#endif
44180+
44181+#endif /* !BE_CMDS_H */
44182diff --git a/drivers/net/benet/be_compat.c b/drivers/net/benet/be_compat.c
44183new file mode 100644
44184index 0000000..bdd1dba
44185--- /dev/null
44186+++ b/drivers/net/benet/be_compat.c
44187@@ -0,0 +1,630 @@
44188+/*
44189+ * Copyright (C) 2005 - 2011 Emulex
44190+ * All rights reserved.
44191+ *
44192+ * This program is free software; you can redistribute it and/or
44193+ * modify it under the terms of the GNU General Public License version 2
44194+ * as published by the Free Software Foundation. The full GNU General
44195+ * Public License is included in this distribution in the file called COPYING.
44196+ *
44197+ * Contact Information:
44198+ * linux-drivers@emulex.com
44199+ *
44200+ * Emulex
44201+ * 3333 Susan Street
44202+ * Costa Mesa, CA 92626
44203+ */
44204+
44205+#include "be.h"
44206+
44207+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
44208+void be_netdev_ops_init(struct net_device *netdev, struct net_device_ops *ops)
44209+{
44210+ netdev->open = ops->ndo_open;
44211+ netdev->stop = ops->ndo_stop;
44212+ netdev->hard_start_xmit = ops->ndo_start_xmit;
44213+ netdev->set_mac_address = ops->ndo_set_mac_address;
44214+ netdev->get_stats = ops->ndo_get_stats;
44215+ netdev->set_multicast_list = ops->ndo_set_rx_mode;
44216+ netdev->change_mtu = ops->ndo_change_mtu;
44217+ netdev->vlan_rx_register = ops->ndo_vlan_rx_register;
44218+ netdev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
44219+ netdev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
44220+ netdev->do_ioctl = ops->ndo_do_ioctl;
44221+#ifdef CONFIG_NET_POLL_CONTROLLER
44222+ netdev->poll_controller = ops->ndo_poll_controller;
44223+#endif
44224+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
44225+ netdev->select_queue = ops->ndo_select_queue;
44226+#endif
44227+}
44228+#endif
44229+
44230+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
44231+int eth_validate_addr(struct net_device *netdev)
44232+{
44233+ return 0;
44234+}
44235+#endif
44236+
44237+/* New NAPI backport */
44238+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 24)
44239+
44240+int be_poll_compat(struct net_device *netdev, int *budget)
44241+{
44242+ struct napi_struct *napi = netdev->priv;
44243+ u32 work_done, can_do;
44244+
44245+ can_do = min(*budget, netdev->quota);
44246+ work_done = napi->poll(napi, can_do);
44247+
44248+ *budget -= work_done;
44249+ netdev->quota -= work_done;
44250+ if (napi->rx)
44251+ return (work_done >= can_do);
44252+ return 0;
44253+}
44254+
44255+
44256+#endif /* New NAPI backport */
44257+
44258+int be_netif_napi_add(struct net_device *netdev,
44259+ struct napi_struct *napi,
44260+ int (*poll) (struct napi_struct *, int), int weight)
44261+{
44262+#ifdef HAVE_SIMULATED_MULTI_NAPI
44263+ struct be_adapter *adapter = netdev_priv(netdev);
44264+ struct net_device *nd;
44265+
44266+ nd = alloc_netdev(0, "", ether_setup);
44267+ if (!nd)
44268+ return -ENOMEM;
44269+ nd->priv = napi;
44270+ nd->weight = BE_NAPI_WEIGHT;
44271+ nd->poll = be_poll_compat;
44272+ set_bit(__LINK_STATE_START, &nd->state);
44273+
44274+ if (napi == &adapter->rx_obj[0].rx_eq.napi)
44275+ napi->rx = true;
44276+ napi->poll = poll;
44277+ napi->dev = nd;
44278+#ifdef RHEL_NEW_NAPI
44279+ napi->napi.dev = netdev;
44280+#endif
44281+ return 0;
44282+#else
44283+ netif_napi_add(netdev, napi, poll, weight);
44284+ return 0;
44285+#endif
44286+}
44287+void be_netif_napi_del(struct net_device *netdev)
44288+{
44289+#ifdef HAVE_SIMULATED_MULTI_NAPI
44290+ struct be_adapter *adapter = netdev_priv(netdev);
44291+ struct napi_struct *napi;
44292+ struct be_rx_obj *rxo;
44293+ int i;
44294+
44295+ for_all_rx_queues(adapter, rxo, i) {
44296+ napi = &rxo->rx_eq.napi;
44297+ if (napi->dev) {
44298+ free_netdev(napi->dev);
44299+ napi->dev = NULL;
44300+ }
44301+ }
44302+
44303+ napi = &adapter->tx_eq.napi;
44304+ if (napi->dev) {
44305+ free_netdev(napi->dev);
44306+ napi->dev = NULL;
44307+ }
44308+#endif
44309+}
44310+/* INET_LRO backport */
44311+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
44312+
44313+#define TCP_HDR_LEN(tcph) (tcph->doff << 2)
44314+#define IP_HDR_LEN(iph) (iph->ihl << 2)
44315+#define TCP_PAYLOAD_LENGTH(iph, tcph) (ntohs(iph->tot_len) - IP_HDR_LEN(iph) \
44316+ - TCP_HDR_LEN(tcph))
44317+
44318+#define IPH_LEN_WO_OPTIONS 5
44319+#define TCPH_LEN_WO_OPTIONS 5
44320+#define TCPH_LEN_W_TIMESTAMP 8
44321+
44322+#define LRO_MAX_PG_HLEN 64
44323+#define LRO_INC_STATS(lro_mgr, attr) { lro_mgr->stats.attr++; }
44324+/*
44325+ * Basic tcp checks whether packet is suitable for LRO
44326+ */
44327+static int lro_tcp_ip_check(struct iphdr *iph, struct tcphdr *tcph,
44328+ int len, struct net_lro_desc *lro_desc)
44329+{
44330+ /* check ip header: don't aggregate padded frames */
44331+ if (ntohs(iph->tot_len) != len)
44332+ return -1;
44333+
44334+ if (iph->ihl != IPH_LEN_WO_OPTIONS)
44335+ return -1;
44336+
44337+ if (tcph->cwr || tcph->ece || tcph->urg || !tcph->ack
44338+ || tcph->rst || tcph->syn || tcph->fin)
44339+ return -1;
44340+
44341+ if (INET_ECN_is_ce(ipv4_get_dsfield(iph)))
44342+ return -1;
44343+
44344+ if (tcph->doff != TCPH_LEN_WO_OPTIONS
44345+ && tcph->doff != TCPH_LEN_W_TIMESTAMP)
44346+ return -1;
44347+
44348+ /* check tcp options (only timestamp allowed) */
44349+ if (tcph->doff == TCPH_LEN_W_TIMESTAMP) {
44350+ u32 *topt = (u32 *)(tcph + 1);
44351+
44352+ if (*topt != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
44353+ | (TCPOPT_TIMESTAMP << 8)
44354+ | TCPOLEN_TIMESTAMP))
44355+ return -1;
44356+
44357+ /* timestamp should be in right order */
44358+ topt++;
44359+ if (lro_desc && after(ntohl(lro_desc->tcp_rcv_tsval),
44360+ ntohl(*topt)))
44361+ return -1;
44362+
44363+ /* timestamp reply should not be zero */
44364+ topt++;
44365+ if (*topt == 0)
44366+ return -1;
44367+ }
44368+
44369+ return 0;
44370+}
44371+
44372+static void lro_update_tcp_ip_header(struct net_lro_desc *lro_desc)
44373+{
44374+ struct iphdr *iph = lro_desc->iph;
44375+ struct tcphdr *tcph = lro_desc->tcph;
44376+ u32 *p;
44377+ __wsum tcp_hdr_csum;
44378+
44379+ tcph->ack_seq = lro_desc->tcp_ack;
44380+ tcph->window = lro_desc->tcp_window;
44381+
44382+ if (lro_desc->tcp_saw_tstamp) {
44383+ p = (u32 *)(tcph + 1);
44384+ *(p+2) = lro_desc->tcp_rcv_tsecr;
44385+ }
44386+
44387+ iph->tot_len = htons(lro_desc->ip_tot_len);
44388+
44389+ iph->check = 0;
44390+ iph->check = ip_fast_csum((u8 *)lro_desc->iph, iph->ihl);
44391+
44392+ tcph->check = 0;
44393+ tcp_hdr_csum = csum_partial((u8 *)tcph, TCP_HDR_LEN(tcph), 0);
44394+ lro_desc->data_csum = csum_add(lro_desc->data_csum, tcp_hdr_csum);
44395+ tcph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
44396+ lro_desc->ip_tot_len -
44397+ IP_HDR_LEN(iph), IPPROTO_TCP,
44398+ lro_desc->data_csum);
44399+}
44400+
44401+static __wsum lro_tcp_data_csum(struct iphdr *iph, struct tcphdr *tcph, int len)
44402+{
44403+ __wsum tcp_csum;
44404+ __wsum tcp_hdr_csum;
44405+ __wsum tcp_ps_hdr_csum;
44406+
44407+ tcp_csum = ~csum_unfold(tcph->check);
44408+ tcp_hdr_csum = csum_partial((u8 *)tcph, TCP_HDR_LEN(tcph), tcp_csum);
44409+
44410+ tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
44411+ len + TCP_HDR_LEN(tcph),
44412+ IPPROTO_TCP, 0);
44413+
44414+ return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
44415+ tcp_ps_hdr_csum);
44416+}
44417+
44418+static void lro_init_desc(struct net_lro_desc *lro_desc, struct sk_buff *skb,
44419+ struct iphdr *iph, struct tcphdr *tcph,
44420+ u16 vlan_tag, struct vlan_group *vgrp)
44421+{
44422+ int nr_frags;
44423+ u32 *ptr;
44424+ u32 tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
44425+
44426+ nr_frags = skb_shinfo(skb)->nr_frags;
44427+ lro_desc->parent = skb;
44428+ lro_desc->next_frag = &(skb_shinfo(skb)->frags[nr_frags]);
44429+ lro_desc->iph = iph;
44430+ lro_desc->tcph = tcph;
44431+ lro_desc->tcp_next_seq = ntohl(tcph->seq) + tcp_data_len;
44432+ lro_desc->tcp_ack = ntohl(tcph->ack_seq);
44433+ lro_desc->tcp_window = tcph->window;
44434+
44435+ lro_desc->pkt_aggr_cnt = 1;
44436+ lro_desc->ip_tot_len = ntohs(iph->tot_len);
44437+
44438+ if (tcph->doff == 8) {
44439+ ptr = (u32 *)(tcph+1);
44440+ lro_desc->tcp_saw_tstamp = 1;
44441+ lro_desc->tcp_rcv_tsval = *(ptr+1);
44442+ lro_desc->tcp_rcv_tsecr = *(ptr+2);
44443+ }
44444+
44445+ lro_desc->mss = tcp_data_len;
44446+ lro_desc->vgrp = vgrp;
44447+ lro_desc->vlan_tag = vlan_tag;
44448+ lro_desc->active = 1;
44449+
44450+ if (tcp_data_len)
44451+ lro_desc->data_csum = lro_tcp_data_csum(iph, tcph,
44452+ tcp_data_len);
44453+
44454+ if (!tcp_data_len)
44455+ lro_desc->ack_cnt++;
44456+}
44457+
44458+static inline void lro_clear_desc(struct net_lro_desc *lro_desc)
44459+{
44460+ memset(lro_desc, 0, sizeof(struct net_lro_desc));
44461+}
44462+
44463+static void lro_add_common(struct net_lro_desc *lro_desc, struct iphdr *iph,
44464+ struct tcphdr *tcph, int tcp_data_len)
44465+{
44466+ struct sk_buff *parent = lro_desc->parent;
44467+ u32 *topt;
44468+
44469+ lro_desc->pkt_aggr_cnt++;
44470+ lro_desc->ip_tot_len += tcp_data_len;
44471+ lro_desc->tcp_next_seq += tcp_data_len;
44472+ lro_desc->tcp_window = tcph->window;
44473+ lro_desc->tcp_ack = tcph->ack_seq;
44474+
44475+ /* don't update tcp_rcv_tsval, would not work with PAWS */
44476+ if (lro_desc->tcp_saw_tstamp) {
44477+ topt = (u32 *) (tcph + 1);
44478+ lro_desc->tcp_rcv_tsecr = *(topt + 2);
44479+ }
44480+
44481+ if (tcp_data_len)
44482+ lro_desc->data_csum = csum_block_add(lro_desc->data_csum,
44483+ lro_tcp_data_csum(iph, tcph,
44484+ tcp_data_len),
44485+ parent->len);
44486+
44487+ parent->len += tcp_data_len;
44488+ parent->data_len += tcp_data_len;
44489+ if (tcp_data_len > lro_desc->mss)
44490+ lro_desc->mss = tcp_data_len;
44491+}
44492+
44493+static void lro_add_frags(struct net_lro_desc *lro_desc,
44494+ int len, int hlen, int truesize,
44495+ struct skb_frag_struct *skb_frags,
44496+ struct iphdr *iph, struct tcphdr *tcph)
44497+{
44498+ struct sk_buff *skb = lro_desc->parent;
44499+ int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
44500+
44501+ lro_add_common(lro_desc, iph, tcph, tcp_data_len);
44502+
44503+ skb->truesize += truesize;
44504+
44505+ if (!tcp_data_len) {
44506+ put_page(skb_frags[0].page);
44507+ lro_desc->ack_cnt++;
44508+ return;
44509+ }
44510+
44511+ skb_frags[0].page_offset += hlen;
44512+ skb_frags[0].size -= hlen;
44513+
44514+ while (tcp_data_len > 0) {
44515+ *(lro_desc->next_frag) = *skb_frags;
44516+ tcp_data_len -= skb_frags->size;
44517+ lro_desc->next_frag++;
44518+ skb_frags++;
44519+ skb_shinfo(skb)->nr_frags++;
44520+ }
44521+}
44522+
44523+static int lro_check_tcp_conn(struct net_lro_desc *lro_desc,
44524+ struct iphdr *iph,
44525+ struct tcphdr *tcph)
44526+{
44527+ if ((lro_desc->iph->saddr != iph->saddr)
44528+ || (lro_desc->iph->daddr != iph->daddr)
44529+ || (lro_desc->tcph->source != tcph->source)
44530+ || (lro_desc->tcph->dest != tcph->dest))
44531+ return -1;
44532+ return 0;
44533+}
44534+
44535+static struct net_lro_desc *lro_get_desc(struct net_lro_mgr *lro_mgr,
44536+ struct net_lro_desc *lro_arr,
44537+ struct iphdr *iph,
44538+ struct tcphdr *tcph)
44539+{
44540+ struct net_lro_desc *lro_desc = NULL;
44541+ struct net_lro_desc *tmp;
44542+ int max_desc = lro_mgr->max_desc;
44543+ int i;
44544+
44545+ for (i = 0; i < max_desc; i++) {
44546+ tmp = &lro_arr[i];
44547+ if (tmp->active)
44548+ if (!lro_check_tcp_conn(tmp, iph, tcph)) {
44549+ lro_desc = tmp;
44550+ goto out;
44551+ }
44552+ }
44553+
44554+ for (i = 0; i < max_desc; i++) {
44555+ if (!lro_arr[i].active) {
44556+ lro_desc = &lro_arr[i];
44557+ goto out;
44558+ }
44559+ }
44560+
44561+ LRO_INC_STATS(lro_mgr, no_desc);
44562+out:
44563+ return lro_desc;
44564+}
44565+
44566+static void lro_flush(struct net_lro_mgr *lro_mgr,
44567+ struct net_lro_desc *lro_desc)
44568+{
44569+ struct be_adapter *adapter = netdev_priv(lro_mgr->dev);
44570+
44571+ if (lro_desc->pkt_aggr_cnt > 1)
44572+ lro_update_tcp_ip_header(lro_desc);
44573+
44574+ skb_shinfo(lro_desc->parent)->gso_size = lro_desc->mss;
44575+
44576+ if (lro_desc->vgrp) {
44577+ if (test_bit(LRO_F_NAPI, &lro_mgr->features))
44578+ vlan_hwaccel_receive_skb(lro_desc->parent,
44579+ lro_desc->vgrp,
44580+ lro_desc->vlan_tag);
44581+ else
44582+ vlan_hwaccel_rx(lro_desc->parent,
44583+ lro_desc->vgrp,
44584+ lro_desc->vlan_tag);
44585+
44586+ } else {
44587+ if (test_bit(LRO_F_NAPI, &lro_mgr->features))
44588+ netif_receive_skb(lro_desc->parent);
44589+ else
44590+ netif_rx(lro_desc->parent);
44591+ }
44592+
44593+ LRO_INC_STATS(lro_mgr, flushed);
44594+ lro_clear_desc(lro_desc);
44595+}
44596+
44597+static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr,
44598+ struct skb_frag_struct *frags,
44599+ int len, int true_size,
44600+ void *mac_hdr,
44601+ int hlen, __wsum sum,
44602+ u32 ip_summed)
44603+{
44604+ struct sk_buff *skb;
44605+ struct skb_frag_struct *skb_frags;
44606+ int data_len = len;
44607+ int hdr_len = min(len, hlen);
44608+
44609+ skb = netdev_alloc_skb(lro_mgr->dev, hlen);
44610+ if (!skb)
44611+ return NULL;
44612+
44613+ skb->len = len;
44614+ skb->data_len = len - hdr_len;
44615+ skb->truesize += true_size;
44616+ skb->tail += hdr_len;
44617+
44618+ memcpy(skb->data, mac_hdr, hdr_len);
44619+
44620+ if (skb->data_len) {
44621+ skb_frags = skb_shinfo(skb)->frags;
44622+ while (data_len > 0) {
44623+ *skb_frags = *frags;
44624+ data_len -= frags->size;
44625+ skb_frags++;
44626+ frags++;
44627+ skb_shinfo(skb)->nr_frags++;
44628+ }
44629+ skb_shinfo(skb)->frags[0].page_offset += hdr_len;
44630+ skb_shinfo(skb)->frags[0].size -= hdr_len;
44631+ } else {
44632+ put_page(frags[0].page);
44633+ }
44634+
44635+
44636+ skb->ip_summed = ip_summed;
44637+ skb->csum = sum;
44638+ skb->protocol = eth_type_trans(skb, lro_mgr->dev);
44639+ return skb;
44640+}
44641+
44642+static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr,
44643+ struct skb_frag_struct *frags,
44644+ int len, int true_size,
44645+ struct vlan_group *vgrp,
44646+ u16 vlan_tag, void *priv, __wsum sum)
44647+{
44648+ struct net_lro_desc *lro_desc;
44649+ struct iphdr *iph;
44650+ struct tcphdr *tcph;
44651+ struct sk_buff *skb;
44652+ u64 flags;
44653+ void *mac_hdr;
44654+ int mac_hdr_len;
44655+ int hdr_len = LRO_MAX_PG_HLEN;
44656+ int vlan_hdr_len = 0;
44657+ u8 pad_bytes;
44658+
44659+ if (!lro_mgr->get_frag_header
44660+ || lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph,
44661+ (void *)&tcph, &flags, priv)) {
44662+ mac_hdr = page_address(frags->page) + frags->page_offset;
44663+ goto out1;
44664+ }
44665+
44666+ if (!(flags & LRO_IPV4) || !(flags & LRO_TCP))
44667+ goto out1;
44668+
44669+ hdr_len = (int)((void *)(tcph) + TCP_HDR_LEN(tcph) - mac_hdr);
44670+ mac_hdr_len = (int)((void *)(iph) - mac_hdr);
44671+
44672+ lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
44673+ if (!lro_desc)
44674+ goto out1;
44675+
44676+ pad_bytes = len - (ntohs(iph->tot_len) + mac_hdr_len);
44677+ if (!TCP_PAYLOAD_LENGTH(iph, tcph) && pad_bytes) {
44678+ len -= pad_bytes; /* trim the packet */
44679+ frags[0].size -= pad_bytes;
44680+ true_size -= pad_bytes;
44681+ }
44682+
44683+ if (!lro_desc->active) { /* start new lro session */
44684+ if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, NULL))
44685+ goto out1;
44686+
44687+ skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
44688+ hdr_len, 0, lro_mgr->ip_summed_aggr);
44689+ if (!skb)
44690+ goto out;
44691+
44692+ if ((skb->protocol == htons(ETH_P_8021Q))
44693+ && !test_bit(LRO_F_EXTRACT_VLAN_ID, &lro_mgr->features))
44694+ vlan_hdr_len = VLAN_HLEN;
44695+
44696+ iph = (void *)(skb->data + vlan_hdr_len);
44697+ tcph = (void *)((u8 *)skb->data + vlan_hdr_len
44698+ + IP_HDR_LEN(iph));
44699+
44700+ lro_init_desc(lro_desc, skb, iph, tcph, vlan_tag, vgrp);
44701+ LRO_INC_STATS(lro_mgr, aggregated);
44702+ return 0;
44703+ }
44704+
44705+ if (lro_desc->tcp_next_seq != ntohl(tcph->seq))
44706+ goto out2;
44707+
44708+ if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, lro_desc))
44709+ goto out2;
44710+
44711+ lro_add_frags(lro_desc, len, hdr_len, true_size, frags, iph, tcph);
44712+ LRO_INC_STATS(lro_mgr, aggregated);
44713+
44714+ if ((skb_shinfo(lro_desc->parent)->nr_frags >= lro_mgr->max_aggr) ||
44715+ lro_desc->parent->len > (0xFFFF - lro_mgr->dev->mtu))
44716+ lro_flush(lro_mgr, lro_desc);
44717+
44718+ return NULL;
44719+
44720+out2: /* send aggregated packets to the stack */
44721+ lro_flush(lro_mgr, lro_desc);
44722+
44723+out1: /* Original packet has to be posted to the stack */
44724+ skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
44725+ hdr_len, sum, lro_mgr->ip_summed);
44726+out:
44727+ return skb;
44728+}
44729+
44730+void lro_receive_frags_compat(struct net_lro_mgr *lro_mgr,
44731+ struct skb_frag_struct *frags,
44732+ int len, int true_size, void *priv, __wsum sum)
44733+{
44734+ struct sk_buff *skb;
44735+
44736+ skb = __lro_proc_segment(lro_mgr, frags, len, true_size, NULL, 0,
44737+ priv, sum);
44738+ if (!skb)
44739+ return;
44740+
44741+ if (test_bit(LRO_F_NAPI, &lro_mgr->features))
44742+ netif_receive_skb(skb);
44743+ else
44744+ netif_rx(skb);
44745+}
44746+
44747+void lro_vlan_hwaccel_receive_frags_compat(struct net_lro_mgr *lro_mgr,
44748+ struct skb_frag_struct *frags,
44749+ int len, int true_size,
44750+ struct vlan_group *vgrp,
44751+ u16 vlan_tag, void *priv, __wsum sum)
44752+{
44753+ struct sk_buff *skb;
44754+
44755+ skb = __lro_proc_segment(lro_mgr, frags, len, true_size, vgrp,
44756+ vlan_tag, priv, sum);
44757+ if (!skb)
44758+ return;
44759+
44760+ if (test_bit(LRO_F_NAPI, &lro_mgr->features))
44761+ vlan_hwaccel_receive_skb(skb, vgrp, vlan_tag);
44762+ else
44763+ vlan_hwaccel_rx(skb, vgrp, vlan_tag);
44764+}
44765+
44766+void lro_flush_all_compat(struct net_lro_mgr *lro_mgr)
44767+{
44768+ int i;
44769+ struct net_lro_desc *lro_desc = lro_mgr->lro_arr;
44770+
44771+ for (i = 0; i < lro_mgr->max_desc; i++) {
44772+ if (lro_desc[i].active)
44773+ lro_flush(lro_mgr, &lro_desc[i]);
44774+ }
44775+}
44776+#endif /* INET_LRO backport */
44777+
44778+#ifndef TX_MQ
44779+struct net_device *alloc_etherdev_mq_compat(int sizeof_priv,
44780+ unsigned int queue_count)
44781+{
44782+ return alloc_etherdev(sizeof_priv);
44783+}
44784+
44785+void netif_wake_subqueue_compat(struct net_device *dev, u16 queue_index)
44786+{
44787+ netif_wake_queue(dev);
44788+}
44789+
44790+void netif_stop_subqueue_compat(struct net_device *dev, u16 queue_index)
44791+{
44792+ netif_stop_queue(dev);
44793+}
44794+
44795+int __netif_subqueue_stopped_compat(const struct net_device *dev,
44796+ u16 queue_index)
44797+{
44798+ return netif_queue_stopped(dev);
44799+}
44800+
44801+u16 skb_get_queue_mapping_compat(const struct sk_buff *skb)
44802+{
44803+ return 0;
44804+}
44805+
44806+void netif_set_real_num_tx_queues_compat(struct net_device *dev,
44807+ unsigned int txq)
44808+{
44809+ return;
44810+}
44811+
44812+u16 skb_tx_hash_compat(const struct net_device *dev,
44813+ const struct sk_buff *skb)
44814+{
44815+ return 0;
44816+}
44817+#endif
44818diff --git a/drivers/net/benet/be_compat.h b/drivers/net/benet/be_compat.h
44819new file mode 100644
44820index 0000000..8ceecc8
44821--- /dev/null
44822+++ b/drivers/net/benet/be_compat.h
44823@@ -0,0 +1,621 @@
44824+/*
44825+ * Copyright (C) 2005 - 2011 Emulex
44826+ * All rights reserved.
44827+ *
44828+ * This program is free software; you can redistribute it and/or
44829+ * modify it under the terms of the GNU General Public License version 2
44830+ * as published by the Free Software Foundation. The full GNU General
44831+ * Public License is included in this distribution in the file called COPYING.
44832+ *
44833+ * Contact Information:
44834+ * linux-drivers@emulex.com
44835+ *
44836+ * Emulex
44837+ * 3333 Susan Street
44838+ * Costa Mesa, CA 92626
44839+ */
44840+
44841+#ifndef BE_COMPAT_H
44842+#define BE_COMPAT_H
44843+
44844+/****************** RHEL5 and SLES10 backport ***************************/
44845+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
44846+
44847+#ifndef upper_32_bits
44848+#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
44849+#endif
44850+
44851+#ifndef CHECKSUM_PARTIAL
44852+#define CHECKSUM_PARTIAL CHECKSUM_HW
44853+#define CHECKSUM_COMPLETE CHECKSUM_HW
44854+#endif
44855+
44856+#if !defined(ip_hdr)
44857+#define ip_hdr(skb) (skb->nh.iph)
44858+#define ipv6_hdr(skb) (skb->nh.ipv6h)
44859+#endif
44860+
44861+#if !defined(__packed)
44862+#define __packed __attribute__ ((packed))
44863+#endif
44864+
44865+#if !defined(RHEL_MINOR)
44866+/* Only for RH5U1 (Maui) and SLES10 NIC driver */
44867+enum {
44868+ false = 0,
44869+ true = 1
44870+};
44871+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18)
44872+/* Only for RH5U1 (Maui) NIC driver */
44873+static inline __attribute__((const))
44874+int __ilog2_u32(u32 n)
44875+{
44876+ return fls(n) - 1;
44877+}
44878+#endif
44879+#endif
44880+
44881+#define ETH_FCS_LEN 4
44882+#define bool u8
44883+#ifndef PTR_ALIGN
44884+#define PTR_ALIGN(p, a) ((typeof(p)) \
44885+ ALIGN((unsigned long)(p), (a)))
44886+#endif
44887+#define list_first_entry(ptr, type, member) \
44888+ list_entry((ptr)->next, type, member)
44889+
44890+#if (defined(RHEL_MINOR) && RHEL_MINOR < 6) || \
44891+ LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 16)
44892+#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] \
44893+ __devinitdata
44894+#endif
44895+
44896+/* Backport of request_irq */
44897+typedef irqreturn_t(*backport_irq_handler_t) (int, void *);
44898+static inline int
44899+backport_request_irq(unsigned int irq, irqreturn_t(*handler) (int, void *),
44900+ unsigned long flags, const char *dev_name, void *dev_id)
44901+{
44902+ return request_irq(irq,
44903+ (irqreturn_t(*) (int, void *, struct pt_regs *))handler,
44904+ flags, dev_name, dev_id);
44905+}
44906+#define request_irq backport_request_irq
44907+
44908+#endif /*** RHEL5 and SLES10 backport ***/
44909+
44910+#if !defined(__packed)
44911+#define __packed __attribute__ ((packed))
44912+#endif
44913+
44914+/****************** SLES10 only backport ***************************/
44915+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
44916+
44917+#include <linux/tifm.h>
44918+
44919+#define FIELD_SIZEOF(t, f) (sizeof(((t *)0)->f))
44920+#define IRQF_SHARED SA_SHIRQ
44921+#define CHECKSUM_PARTIAL CHECKSUM_HW
44922+#define CHECKSUM_COMPLETE CHECKSUM_HW
44923+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
44924+#define NETIF_F_IPV6_CSUM NETIF_F_IP_CSUM
44925+#define NETIF_F_TSO6 NETIF_F_TSO
44926+
44927+
44928+static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
44929+ unsigned int length)
44930+{
44931+ /* 16 == NET_PAD_SKB */
44932+ struct sk_buff *skb;
44933+ skb = alloc_skb(length + 16, GFP_ATOMIC);
44934+ if (likely(skb != NULL)) {
44935+ skb_reserve(skb, 16);
44936+ skb->dev = dev;
44937+ }
44938+ return skb;
44939+}
44940+
44941+#define PCI_SAVE_STATE(x)
44942+
44943+#else /* SLES10 only backport */
44944+
44945+#define PCI_SAVE_STATE(x) pci_save_state(x)
44946+
44947+#endif /* SLES10 only backport */
44948+
44949+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31)
44950+#define netdev_tx_t int
44951+#endif
44952+
44953+#ifndef VLAN_PRIO_MASK
44954+#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
44955+#define VLAN_PRIO_SHIFT 13
44956+#endif
44957+
44958+/*
44959+ * Backport of netdev ops struct
44960+ */
44961+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
44962+struct net_device_ops {
44963+ int (*ndo_init)(struct net_device *dev);
44964+ void (*ndo_uninit)(struct net_device *dev);
44965+ int (*ndo_open)(struct net_device *dev);
44966+ int (*ndo_stop)(struct net_device *dev);
44967+ int (*ndo_start_xmit) (struct sk_buff *skb, struct net_device *dev);
44968+ u16 (*ndo_select_queue)(struct net_device *dev,
44969+ struct sk_buff *skb);
44970+ void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
44971+ void (*ndo_set_rx_mode)(struct net_device *dev);
44972+ void (*ndo_set_multicast_list)(struct net_device *dev);
44973+ int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
44974+ int (*ndo_validate_addr)(struct net_device *dev);
44975+ int (*ndo_do_ioctl)(struct net_device *dev,
44976+ struct ifreq *ifr, int cmd);
44977+ int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
44978+ int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
44979+ int (*ndo_neigh_setup)(struct net_device *dev,
44980+ struct neigh_parms *);
44981+ void (*ndo_tx_timeout) (struct net_device *dev);
44982+
44983+ struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
44984+
44985+ void (*ndo_vlan_rx_register)(struct net_device *dev,
44986+ struct vlan_group *grp);
44987+ void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
44988+ unsigned short vid);
44989+ void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
44990+ unsigned short vid);
44991+#ifdef CONFIG_NET_POLL_CONTROLLER
44992+#define HAVE_NETDEV_POLL
44993+ void (*ndo_poll_controller)(struct net_device *dev);
44994+#endif
44995+};
44996+extern void be_netdev_ops_init(struct net_device *netdev,
44997+ struct net_device_ops *ops);
44998+extern int eth_validate_addr(struct net_device *);
44999+
45000+#endif /* Netdev ops backport */
45001+
45002+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 29)
45003+#undef NETIF_F_GRO
45004+#endif
45005+
45006+#ifdef NO_GRO
45007+#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 5)))
45008+#undef NETIF_F_GRO
45009+#endif
45010+#endif
45011+
45012+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
45013+#define HAVE_ETHTOOL_FLASH
45014+#endif
45015+
45016+/*
45017+ * Backport of NAPI
45018+ */
45019+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 24)
45020+
45021+#if defined(RHEL_MINOR) && (RHEL_MINOR > 3)
45022+#define RHEL_NEW_NAPI
45023+#endif
45024+
45025+/* We need a new struct that has some meta data beyond rhel 5.4's napi_struct
45026+ * to fix rhel5.4's half-baked new napi implementation.
45027+ * We don't want to use rhel 5.4's broken napi_complete; so
45028+ * define a new be_napi_complete that executes the logic only for Rx
45029+ */
45030+
45031+#ifdef RHEL_NEW_NAPI
45032+#define napi_complete be_napi_complete
45033+typedef struct napi_struct rhel_napi_struct;
45034+#endif
45035+#define napi_struct be_napi_struct
45036+#define napi_gro_frags(napi) napi_gro_frags((rhel_napi_struct *) napi)
45037+#define vlan_gro_frags(napi, vlan_grp, vid)\
45038+ vlan_gro_frags((rhel_napi_struct *) napi, vlan_grp, vid)
45039+#define napi_get_frags(napi) napi_get_frags((rhel_napi_struct *) napi)
45040+
45041+struct napi_struct {
45042+#ifdef RHEL_NEW_NAPI
45043+ rhel_napi_struct napi; /* must be the first member */
45044+#endif
45045+ struct net_device *dev;
45046+ int (*poll) (struct napi_struct *napi, int budget);
45047+ bool rx;
45048+};
45049+
45050+static inline void napi_complete(struct napi_struct *napi)
45051+{
45052+#ifdef NETIF_F_GRO
45053+ napi_gro_flush((rhel_napi_struct *)napi);
45054+#endif
45055+ netif_rx_complete(napi->dev);
45056+}
45057+
45058+static inline void napi_schedule(struct napi_struct *napi)
45059+{
45060+ netif_rx_schedule(napi->dev);
45061+}
45062+
45063+static inline void napi_enable(struct napi_struct *napi)
45064+{
45065+ netif_poll_enable(napi->dev);
45066+}
45067+
45068+static inline void napi_disable(struct napi_struct *napi)
45069+{
45070+ netif_poll_disable(napi->dev);
45071+}
45072+
45073+#if (defined(RHEL_MINOR) && RHEL_MINOR < 6) || \
45074+ LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 16)
45075+static inline void vlan_group_set_device(struct vlan_group *vg,
45076+ u16 vlan_id,
45077+ struct net_device *dev)
45078+{
45079+ struct net_device **array;
45080+ if (!vg)
45081+ return;
45082+ array = vg->vlan_devices;
45083+ array[vlan_id] = dev;
45084+}
45085+#endif
45086+
45087+#endif /* New NAPI backport */
45088+
45089+extern int be_netif_napi_add(struct net_device *netdev,
45090+ struct napi_struct *napi,
45091+ int (*poll) (struct napi_struct *, int), int weight);
45092+extern void be_netif_napi_del(struct net_device *netdev);
45093+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
45094+#define HAVE_SIMULATED_MULTI_NAPI
45095+#endif
45096+
45097+/************** Backport of Delayed work queues interface ****************/
45098+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 19)
45099+#if (defined(RHEL_MINOR) && RHEL_MINOR < 6) || \
45100+ LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 16)
45101+struct delayed_work {
45102+ struct work_struct work;
45103+};
45104+#endif
45105+
45106+#define INIT_DELAYED_WORK(_work, _func) \
45107+ INIT_WORK(&(_work)->work, _func, &(_work)->work)
45108+
45109+static inline int backport_cancel_delayed_work_sync(struct delayed_work *work)
45110+{
45111+ cancel_rearming_delayed_work(&work->work);
45112+ return 0;
45113+}
45114+#define cancel_delayed_work_sync backport_cancel_delayed_work_sync
45115+
45116+static inline int backport_schedule_delayed_work(struct delayed_work *work,
45117+ unsigned long delay)
45118+{
45119+ if (unlikely(!delay))
45120+ return schedule_work(&work->work);
45121+ else
45122+ return schedule_delayed_work(&work->work, delay);
45123+}
45124+#define schedule_delayed_work backport_schedule_delayed_work
45125+#endif /* backport delayed workqueue */
45126+
45127+
45128+/************** Backport of INET_LRO **********************************/
45129+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
45130+
45131+#include <linux/inet_lro.h>
45132+
45133+#else
45134+
45135+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18)
45136+
45137+#if defined(RHEL_MINOR) && RHEL_MINOR < 6
45138+typedef __u16 __bitwise __sum16;
45139+typedef __u32 __bitwise __wsum;
45140+#endif
45141+
45142+#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 5) && (RHEL_MINOR <= 3)) || \
45143+ (!defined(RHEL_MINOR)))
45144+static inline __wsum csum_unfold(__sum16 n)
45145+{
45146+ return (__force __wsum)n;
45147+}
45148+#endif
45149+
45150+#endif
45151+
45152+#define lro_flush_all lro_flush_all_compat
45153+#define lro_vlan_hwaccel_receive_frags lro_vlan_hwaccel_receive_frags_compat
45154+#define lro_receive_frags lro_receive_frags_compat
45155+
45156+struct net_lro_stats {
45157+ unsigned long aggregated;
45158+ unsigned long flushed;
45159+ unsigned long no_desc;
45160+};
45161+
45162+struct net_lro_desc {
45163+ struct sk_buff *parent;
45164+ struct sk_buff *last_skb;
45165+ struct skb_frag_struct *next_frag;
45166+ struct iphdr *iph;
45167+ struct tcphdr *tcph;
45168+ struct vlan_group *vgrp;
45169+ __wsum data_csum;
45170+ u32 tcp_rcv_tsecr;
45171+ u32 tcp_rcv_tsval;
45172+ u32 tcp_ack;
45173+ u32 tcp_next_seq;
45174+ u32 skb_tot_frags_len;
45175+ u32 ack_cnt;
45176+ u16 ip_tot_len;
45177+ u16 tcp_saw_tstamp; /* timestamps enabled */
45178+ u16 tcp_window;
45179+ u16 vlan_tag;
45180+ int pkt_aggr_cnt; /* counts aggregated packets */
45181+ int vlan_packet;
45182+ int mss;
45183+ int active;
45184+};
45185+
45186+struct net_lro_mgr {
45187+ struct net_device *dev;
45188+ struct net_lro_stats stats;
45189+
45190+ /* LRO features */
45191+ unsigned long features;
45192+#define LRO_F_NAPI 1 /* Pass packets to stack via NAPI */
45193+#define LRO_F_EXTRACT_VLAN_ID 2 /* Set flag if VLAN IDs are extracted
45194+ from received packets and eth protocol
45195+ is still ETH_P_8021Q */
45196+
45197+ u32 ip_summed; /* Set in non generated SKBs in page mode */
45198+ u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY
45199+ * or CHECKSUM_NONE */
45200+
45201+ int max_desc; /* Max number of LRO descriptors */
45202+ int max_aggr; /* Max number of LRO packets to be aggregated */
45203+
45204+ struct net_lro_desc *lro_arr; /* Array of LRO descriptors */
45205+
45206+ /* Optimized driver functions
45207+ * get_skb_header: returns tcp and ip header for packet in SKB
45208+ */
45209+ int (*get_skb_header)(struct sk_buff *skb, void **ip_hdr,
45210+ void **tcpudp_hdr, u64 *hdr_flags, void *priv);
45211+
45212+ /* hdr_flags: */
45213+#define LRO_IPV4 1 /* ip_hdr is IPv4 header */
45214+#define LRO_TCP 2 /* tcpudp_hdr is TCP header */
45215+
45216+ /*
45217+ * get_frag_header: returns mac, tcp and ip header for packet in SKB
45218+ *
45219+ * @hdr_flags: Indicate what kind of LRO has to be done
45220+ * (IPv4/IPv6/TCP/UDP)
45221+ */
45222+ int (*get_frag_header)(struct skb_frag_struct *frag, void **mac_hdr,
45223+ void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
45224+ void *priv);
45225+};
45226+
45227+extern void lro_receive_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
45228+ void *priv);
45229+
45230+extern void lro_vlan_hwaccel_receive_skb(struct net_lro_mgr *lro_mgr,
45231+ struct sk_buff *skb, struct vlan_group *vgrp,
45232+ u16 vlan_tag, void *priv);
45233+
45234+/* This functions aggregate fragments and generate SKBs do pass
45235+ * the packets to the stack.
45236+ *
45237+ * @lro_mgr: LRO manager to use
45238+ * @frags: Fragment to be processed. Must contain entire header in first
45239+ * element.
45240+ * @len: Length of received data
45241+ * @true_size: Actual size of memory the fragment is consuming
45242+ * @priv: Private data that may be used by driver functions
45243+ * (for example get_tcp_ip_hdr)
45244+ */
45245+extern void lro_receive_frags_compat(struct net_lro_mgr *lro_mgr,
45246+ struct skb_frag_struct *frags, int len, int true_size,
45247+ void *priv, __wsum sum);
45248+
45249+extern void lro_vlan_hwaccel_receive_frags_compat(struct net_lro_mgr *lro_mgr,
45250+ struct skb_frag_struct *frags, int len, int true_size,
45251+ struct vlan_group *vgrp, u16 vlan_tag, void *priv,
45252+ __wsum sum);
45253+
45254+/* Forward all aggregated SKBs held by lro_mgr to network stack */
45255+extern void lro_flush_all_compat(struct net_lro_mgr *lro_mgr);
45256+
45257+extern void lro_flush_pkt(struct net_lro_mgr *lro_mgr, struct iphdr *iph,
45258+ struct tcphdr *tcph);
45259+#endif /* backport of inet_lro */
45260+
45261+#ifndef ETHTOOL_FLASH_MAX_FILENAME
45262+#define ETHTOOL_FLASH_MAX_FILENAME 128
45263+#endif
45264+
45265+#if defined(CONFIG_XEN) && !defined(NETIF_F_GRO)
45266+#define BE_INIT_FRAGS_PER_FRAME (u32) 1
45267+#else
45268+#define BE_INIT_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS))
45269+#endif
45270+
45271+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
45272+#ifdef CONFIG_PCI_IOV
45273+#if (!(defined(RHEL_MAJOR) && (RHEL_MAJOR == 5) && (RHEL_MINOR == 6)))
45274+#undef CONFIG_PCI_IOV
45275+#endif
45276+#endif
45277+#endif
45278+
45279+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
45280+#define dev_to_node(dev) -1
45281+#endif
45282+
45283+
45284+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
45285+#if (!(defined(RHEL_MAJOR) && (RHEL_MAJOR == 5) && (RHEL_MINOR > 6)))
45286+static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
45287+ unsigned int length)
45288+{
45289+ struct sk_buff *skb = netdev_alloc_skb(dev, length + NET_IP_ALIGN);
45290+
45291+ if (NET_IP_ALIGN && skb)
45292+ skb_reserve(skb, NET_IP_ALIGN);
45293+ return skb;
45294+}
45295+#endif
45296+#endif
45297+
45298+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
45299+#ifndef netif_set_gso_max_size
45300+#define netif_set_gso_max_size(netdev, size) do {} while (0)
45301+#endif
45302+#endif
45303+
45304+#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18))
45305+#if defined(RHEL_MINOR) && (RHEL_MINOR <= 4)
45306+static inline int skb_is_gso_v6(const struct sk_buff *skb)
45307+{
45308+ return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
45309+}
45310+#endif
45311+#endif
45312+
45313+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
45314+static inline int skb_is_gso_v6(const struct sk_buff *skb)
45315+{
45316+ return (ip_hdr(skb)->version == 6);
45317+}
45318+#endif
45319+
45320+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
45321+#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
45322+#endif
45323+
45324+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
45325+#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 6)))
45326+#define HAVE_SRIOV_CONFIG
45327+#endif
45328+#endif
45329+
45330+#ifndef NETIF_F_VLAN_SG
45331+#define NETIF_F_VLAN_SG NETIF_F_SG
45332+#endif
45333+
45334+#ifndef NETIF_F_VLAN_CSUM
45335+#define NETIF_F_VLAN_CSUM NETIF_F_HW_CSUM
45336+#endif
45337+
45338+#ifndef NETIF_F_VLAN_TSO
45339+#define NETIF_F_VLAN_TSO NETIF_F_TSO
45340+#endif
45341+
45342+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27))
45343+#define vlan_features features
45344+#endif
45345+
45346+#ifndef DEFINE_DMA_UNMAP_ADDR
45347+#define DEFINE_DMA_UNMAP_ADDR(bus) dma_addr_t bus
45348+#endif
45349+
45350+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
45351+
45352+#ifndef netdev_mc_count
45353+#define netdev_mc_count(nd) (nd->mc_count)
45354+#endif
45355+
45356+#ifndef netdev_hw_addr
45357+#define netdev_hw_addr dev_mc_list
45358+#endif
45359+
45360+#ifndef netdev_for_each_mc_addr
45361+#define netdev_for_each_mc_addr(ha, nd) \
45362+ for (ha = (nd)->mc_list; ha; ha = ha->next)
45363+#endif
45364+
45365+#define DMI_ADDR dmi_addr
45366+#else
45367+#define DMI_ADDR addr
45368+#endif
45369+
45370+#ifndef VLAN_GROUP_ARRAY_LEN
45371+#define VLAN_GROUP_ARRAY_LEN VLAN_N_VID
45372+#endif
45373+/**************************** Multi TXQ Support ******************************/
45374+
45375+/* Supported only in RHEL6 and SL11.1 (barring one execption) */
45376+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
45377+#define MQ_TX
45378+#endif
45379+
45380+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
45381+#define alloc_etherdev_mq(sz, cnt) alloc_etherdev(sz)
45382+#define skb_get_queue_mapping(skb) 0
45383+#define skb_tx_hash(dev, skb) 0
45384+#define netif_set_real_num_tx_queues(dev, txq) do {} while(0)
45385+#define netif_wake_subqueue(dev, idx) netif_wake_queue(dev)
45386+#define netif_stop_subqueue(dev, idx) netif_stop_queue(dev)
45387+#define __netif_subqueue_stopped(dev, idx) netif_queue_stopped(dev)
45388+#endif /* < 2.6.27 */
45389+
45390+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && \
45391+ (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)))
45392+#define skb_tx_hash(dev, skb) 0
45393+#define netif_set_real_num_tx_queues(dev, txq) do {} while(0)
45394+#endif
45395+
45396+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
45397+#define netif_set_real_num_tx_queues be_set_real_num_tx_queues
45398+static inline void be_set_real_num_tx_queues(struct net_device *dev,
45399+ unsigned int txq)
45400+{
45401+ dev->real_num_tx_queues = txq;
45402+}
45403+#endif
45404+
45405+#include <linux/if_vlan.h>
45406+static inline void be_reset_skb_tx_vlan(struct sk_buff *skb)
45407+{
45408+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
45409+ skb->vlan_tci = 0;
45410+#else
45411+ struct vlan_skb_tx_cookie *cookie;
45412+
45413+ cookie = VLAN_TX_SKB_CB(skb);
45414+ cookie->magic = 0;
45415+#endif
45416+}
45417+
45418+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
45419+static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
45420+{
45421+ skb->nh.raw = skb->data + offset;
45422+}
45423+#endif
45424+
45425+static inline struct sk_buff *be_vlan_put_tag(struct sk_buff *skb,
45426+ unsigned short vlan_tag)
45427+{
45428+ struct sk_buff *new_skb = __vlan_put_tag(skb, vlan_tag);
45429+ /* On kernel versions < 2.6.27 the __vlan_put_tag() function
45430+ * distorts the network layer hdr pointer in the skb which
45431+ * affects the detection of UDP/TCP packets down the line in
45432+ * wrb_fill_hdr().This work-around sets it right.
45433+ */
45434+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27))
45435+ skb_set_network_header(new_skb, VLAN_ETH_HLEN);
45436+#endif
45437+ return new_skb;
45438+}
45439+
45440+#ifndef ACCESS_ONCE
45441+#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
45442+#endif
45443+
45444+#endif /* BE_COMPAT_H */
45445diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
45446index f0fd95b..37bad99 100644
45447--- a/drivers/net/benet/be_ethtool.c
45448+++ b/drivers/net/benet/be_ethtool.c
45449@@ -1,18 +1,18 @@
45450 /*
45451- * Copyright (C) 2005 - 2009 ServerEngines
45452+ * Copyright (C) 2005 - 2011 Emulex
45453 * All rights reserved.
45454 *
45455 * This program is free software; you can redistribute it and/or
45456 * modify it under the terms of the GNU General Public License version 2
45457- * as published by the Free Software Foundation. The full GNU General
45458+ * as published by the Free Software Foundation. The full GNU General
45459 * Public License is included in this distribution in the file called COPYING.
45460 *
45461 * Contact Information:
45462- * linux-drivers@serverengines.com
45463+ * linux-drivers@emulex.com
45464 *
45465- * ServerEngines
45466- * 209 N. Fair Oaks Ave
45467- * Sunnyvale, CA 94085
45468+ * Emulex
45469+ * 3333 Susan Street
45470+ * Costa Mesa, CA 92626
45471 */
45472
45473 #include "be.h"
45474@@ -26,21 +26,19 @@ struct be_ethtool_stat {
45475 int offset;
45476 };
45477
45478-enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT, ERXSTAT};
45479+enum {NETSTAT, DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
45480 #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
45481 offsetof(_struct, field)
45482-#define NETSTAT_INFO(field) #field, NETSTAT,\
45483+#define NETSTAT_INFO(field) #field, NETSTAT,\
45484 FIELDINFO(struct net_device_stats,\
45485 field)
45486-#define DRVSTAT_INFO(field) #field, DRVSTAT,\
45487- FIELDINFO(struct be_drvr_stats, field)
45488-#define MISCSTAT_INFO(field) #field, MISCSTAT,\
45489- FIELDINFO(struct be_rxf_stats, field)
45490-#define PORTSTAT_INFO(field) #field, PORTSTAT,\
45491- FIELDINFO(struct be_port_rxf_stats, \
45492+#define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
45493+ FIELDINFO(struct be_tx_stats, field)
45494+#define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
45495+ FIELDINFO(struct be_rx_stats, field)
45496+#define DRVSTAT_INFO(field) #field, DRVSTAT,\
45497+ FIELDINFO(struct be_drv_stats, \
45498 field)
45499-#define ERXSTAT_INFO(field) #field, ERXSTAT,\
45500- FIELDINFO(struct be_erx_stats, field)
45501
45502 static const struct be_ethtool_stat et_stats[] = {
45503 {NETSTAT_INFO(rx_packets)},
45504@@ -51,70 +49,131 @@ static const struct be_ethtool_stat et_stats[] = {
45505 {NETSTAT_INFO(tx_errors)},
45506 {NETSTAT_INFO(rx_dropped)},
45507 {NETSTAT_INFO(tx_dropped)},
45508- {DRVSTAT_INFO(be_tx_reqs)},
45509- {DRVSTAT_INFO(be_tx_stops)},
45510- {DRVSTAT_INFO(be_fwd_reqs)},
45511- {DRVSTAT_INFO(be_tx_wrbs)},
45512- {DRVSTAT_INFO(be_polls)},
45513 {DRVSTAT_INFO(be_tx_events)},
45514- {DRVSTAT_INFO(be_rx_events)},
45515- {DRVSTAT_INFO(be_tx_compl)},
45516- {DRVSTAT_INFO(be_rx_compl)},
45517- {DRVSTAT_INFO(be_ethrx_post_fail)},
45518- {DRVSTAT_INFO(be_802_3_dropped_frames)},
45519- {DRVSTAT_INFO(be_802_3_malformed_frames)},
45520- {DRVSTAT_INFO(be_tx_rate)},
45521- {DRVSTAT_INFO(be_rx_rate)},
45522- {PORTSTAT_INFO(rx_unicast_frames)},
45523- {PORTSTAT_INFO(rx_multicast_frames)},
45524- {PORTSTAT_INFO(rx_broadcast_frames)},
45525- {PORTSTAT_INFO(rx_crc_errors)},
45526- {PORTSTAT_INFO(rx_alignment_symbol_errors)},
45527- {PORTSTAT_INFO(rx_pause_frames)},
45528- {PORTSTAT_INFO(rx_control_frames)},
45529- {PORTSTAT_INFO(rx_in_range_errors)},
45530- {PORTSTAT_INFO(rx_out_range_errors)},
45531- {PORTSTAT_INFO(rx_frame_too_long)},
45532- {PORTSTAT_INFO(rx_address_match_errors)},
45533- {PORTSTAT_INFO(rx_vlan_mismatch)},
45534- {PORTSTAT_INFO(rx_dropped_too_small)},
45535- {PORTSTAT_INFO(rx_dropped_too_short)},
45536- {PORTSTAT_INFO(rx_dropped_header_too_small)},
45537- {PORTSTAT_INFO(rx_dropped_tcp_length)},
45538- {PORTSTAT_INFO(rx_dropped_runt)},
45539- {PORTSTAT_INFO(rx_fifo_overflow)},
45540- {PORTSTAT_INFO(rx_input_fifo_overflow)},
45541- {PORTSTAT_INFO(rx_ip_checksum_errs)},
45542- {PORTSTAT_INFO(rx_tcp_checksum_errs)},
45543- {PORTSTAT_INFO(rx_udp_checksum_errs)},
45544- {PORTSTAT_INFO(rx_non_rss_packets)},
45545- {PORTSTAT_INFO(rx_ipv4_packets)},
45546- {PORTSTAT_INFO(rx_ipv6_packets)},
45547- {PORTSTAT_INFO(tx_unicastframes)},
45548- {PORTSTAT_INFO(tx_multicastframes)},
45549- {PORTSTAT_INFO(tx_broadcastframes)},
45550- {PORTSTAT_INFO(tx_pauseframes)},
45551- {PORTSTAT_INFO(tx_controlframes)},
45552- {MISCSTAT_INFO(rx_drops_no_pbuf)},
45553- {MISCSTAT_INFO(rx_drops_no_txpb)},
45554- {MISCSTAT_INFO(rx_drops_no_erx_descr)},
45555- {MISCSTAT_INFO(rx_drops_no_tpre_descr)},
45556- {MISCSTAT_INFO(rx_drops_too_many_frags)},
45557- {MISCSTAT_INFO(rx_drops_invalid_ring)},
45558- {MISCSTAT_INFO(forwarded_packets)},
45559- {MISCSTAT_INFO(rx_drops_mtu)},
45560- {ERXSTAT_INFO(rx_drops_no_fragments)},
45561+ {DRVSTAT_INFO(rx_crc_errors)},
45562+ {DRVSTAT_INFO(rx_alignment_symbol_errors)},
45563+ {DRVSTAT_INFO(rx_pause_frames)},
45564+ {DRVSTAT_INFO(rx_control_frames)},
45565+ {DRVSTAT_INFO(rx_in_range_errors)},
45566+ {DRVSTAT_INFO(rx_out_range_errors)},
45567+ {DRVSTAT_INFO(rx_frame_too_long)},
45568+ {DRVSTAT_INFO(rx_address_match_errors)},
45569+ {DRVSTAT_INFO(rx_dropped_too_small)},
45570+ {DRVSTAT_INFO(rx_dropped_too_short)},
45571+ {DRVSTAT_INFO(rx_dropped_header_too_small)},
45572+ {DRVSTAT_INFO(rx_dropped_tcp_length)},
45573+ {DRVSTAT_INFO(rx_dropped_runt)},
45574+ {DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
45575+ {DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
45576+ {DRVSTAT_INFO(rx_ip_checksum_errs)},
45577+ {DRVSTAT_INFO(rx_tcp_checksum_errs)},
45578+ {DRVSTAT_INFO(rx_udp_checksum_errs)},
45579+ {DRVSTAT_INFO(rx_switched_unicast_packets)},
45580+ {DRVSTAT_INFO(rx_switched_multicast_packets)},
45581+ {DRVSTAT_INFO(rx_switched_broadcast_packets)},
45582+ {DRVSTAT_INFO(tx_pauseframes)},
45583+ {DRVSTAT_INFO(tx_controlframes)},
45584+ {DRVSTAT_INFO(rx_priority_pause_frames)},
45585+ {DRVSTAT_INFO(pmem_fifo_overflow_drop)},
45586+ {DRVSTAT_INFO(jabber_events)},
45587+ {DRVSTAT_INFO(rx_drops_no_pbuf)},
45588+ {DRVSTAT_INFO(rx_drops_no_txpb)},
45589+ {DRVSTAT_INFO(rx_drops_no_erx_descr)},
45590+ {DRVSTAT_INFO(rx_drops_no_tpre_descr)},
45591+ {DRVSTAT_INFO(rx_drops_too_many_frags)},
45592+ {DRVSTAT_INFO(rx_drops_invalid_ring)},
45593+ {DRVSTAT_INFO(forwarded_packets)},
45594+ {DRVSTAT_INFO(rx_drops_mtu)},
45595+ {DRVSTAT_INFO(eth_red_drops)},
45596+ {DRVSTAT_INFO(be_on_die_temperature)}
45597 };
45598 #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
45599
45600+/* Stats related to multi RX queues */
45601+static const struct be_ethtool_stat et_rx_stats[] = {
45602+ {DRVSTAT_RX_INFO(rx_bytes)},
45603+ {DRVSTAT_RX_INFO(rx_pkts)},
45604+ {DRVSTAT_RX_INFO(rx_rate)},
45605+ {DRVSTAT_RX_INFO(rx_polls)},
45606+ {DRVSTAT_RX_INFO(rx_events)},
45607+ {DRVSTAT_RX_INFO(rx_compl)},
45608+ {DRVSTAT_RX_INFO(rx_mcast_pkts)},
45609+ {DRVSTAT_RX_INFO(rx_post_fail)},
45610+ {DRVSTAT_RX_INFO(rx_drops_no_frags)}
45611+};
45612+#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
45613+
45614+/* Stats related to multi TX queues */
45615+static const struct be_ethtool_stat et_tx_stats[] = {
45616+ {DRVSTAT_TX_INFO(be_tx_rate)},
45617+ {DRVSTAT_TX_INFO(be_tx_reqs)},
45618+ {DRVSTAT_TX_INFO(be_tx_wrbs)},
45619+ {DRVSTAT_TX_INFO(be_tx_stops)},
45620+ {DRVSTAT_TX_INFO(be_tx_compl)},
45621+ {DRVSTAT_TX_INFO(be_ipv6_ext_hdr_tx_drop)}
45622+};
45623+#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
45624+
45625+static const char et_self_tests[][ETH_GSTRING_LEN] = {
45626+ "MAC Loopback test",
45627+ "PHY Loopback test",
45628+ "External Loopback test",
45629+ "DDR DMA test",
45630+ "Link test"
45631+};
45632+
45633+#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
45634+#define BE_MAC_LOOPBACK 0x0
45635+#define BE_PHY_LOOPBACK 0x1
45636+#define BE_ONE_PORT_EXT_LOOPBACK 0x2
45637+#define BE_NO_LOOPBACK 0xff
45638+
45639+/* MAC speed valid values */
45640+#define SPEED_DEFAULT 0x0
45641+#define SPEED_FORCED_10GB 0x1
45642+#define SPEED_FORCED_1GB 0x2
45643+#define SPEED_AUTONEG_10GB 0x3
45644+#define SPEED_AUTONEG_1GB 0x4
45645+#define SPEED_AUTONEG_100MB 0x5
45646+#define SPEED_AUTONEG_10GB_1GB 0x6
45647+#define SPEED_AUTONEG_10GB_1GB_100MB 0x7
45648+#define SPEED_AUTONEG_1GB_100MB 0x8
45649+#define SPEED_AUTONEG_10MB 0x9
45650+#define SPEED_AUTONEG_1GB_100MB_10MB 0xa
45651+#define SPEED_AUTONEG_100MB_10MB 0xb
45652+#define SPEED_FORCED_100MB 0xc
45653+#define SPEED_FORCED_10MB 0xd
45654+
45655+
45656+
45657 static void
45658 be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
45659 {
45660 struct be_adapter *adapter = netdev_priv(netdev);
45661+ int len;
45662+ char fw_on_flash[FW_VER_LEN];
45663+
45664+ memset(fw_on_flash, 0 , sizeof(fw_on_flash));
45665+
45666+ be_cmd_get_fw_ver(adapter, adapter->fw_ver,
45667+ fw_on_flash);
45668
45669 strcpy(drvinfo->driver, DRV_NAME);
45670 strcpy(drvinfo->version, DRV_VER);
45671+
45672 strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
45673+ if (memcmp(adapter->fw_ver, fw_on_flash,
45674+ FW_VER_LEN) != 0) {
45675+ len = strlen(drvinfo->fw_version);
45676+ strncpy(drvinfo->fw_version+len, " [",
45677+ FW_VER_LEN-len-1);
45678+ len = strlen(drvinfo->fw_version);
45679+ strncpy(drvinfo->fw_version+len, fw_on_flash,
45680+ FW_VER_LEN-len-1);
45681+ len = strlen(drvinfo->fw_version);
45682+ strncpy(drvinfo->fw_version+len, "]", FW_VER_LEN-len-1);
45683+ }
45684+
45685 strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
45686 drvinfo->testinfo_len = 0;
45687 drvinfo->regdump_len = 0;
45688@@ -122,12 +181,37 @@ be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
45689 }
45690
45691 static int
45692+be_get_reg_len(struct net_device *netdev)
45693+{
45694+ struct be_adapter *adapter = netdev_priv(netdev);
45695+ u32 log_size = 0;
45696+
45697+ if (be_physfn(adapter))
45698+ be_cmd_get_reg_len(adapter, &log_size);
45699+
45700+ return log_size;
45701+}
45702+
45703+static void
45704+be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
45705+{
45706+ struct be_adapter *adapter = netdev_priv(netdev);
45707+
45708+ if (be_physfn(adapter)) {
45709+ memset(buf, 0, regs->len);
45710+ be_cmd_get_regs(adapter, regs->len, buf);
45711+ }
45712+}
45713+
45714+static int
45715 be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
45716 {
45717 struct be_adapter *adapter = netdev_priv(netdev);
45718- struct be_eq_obj *rx_eq = &adapter->rx_eq;
45719+ struct be_eq_obj *rx_eq = &adapter->rx_obj[0].rx_eq;
45720 struct be_eq_obj *tx_eq = &adapter->tx_eq;
45721
45722+ coalesce->rx_max_coalesced_frames = adapter->max_rx_coal;
45723+
45724 coalesce->rx_coalesce_usecs = rx_eq->cur_eqd;
45725 coalesce->rx_coalesce_usecs_high = rx_eq->max_eqd;
45726 coalesce->rx_coalesce_usecs_low = rx_eq->min_eqd;
45727@@ -149,25 +233,52 @@ static int
45728 be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
45729 {
45730 struct be_adapter *adapter = netdev_priv(netdev);
45731- struct be_eq_obj *rx_eq = &adapter->rx_eq;
45732+ struct be_rx_obj *rxo;
45733+ struct be_eq_obj *rx_eq;
45734 struct be_eq_obj *tx_eq = &adapter->tx_eq;
45735 u32 tx_max, tx_min, tx_cur;
45736 u32 rx_max, rx_min, rx_cur;
45737- int status = 0;
45738+ int status = 0, i;
45739
45740 if (coalesce->use_adaptive_tx_coalesce == 1)
45741 return -EINVAL;
45742+ adapter->max_rx_coal = coalesce->rx_max_coalesced_frames;
45743+ if (adapter->max_rx_coal > BE_MAX_FRAGS_PER_FRAME)
45744+ adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME;
45745
45746- /* if AIC is being turned on now, start with an EQD of 0 */
45747- if (rx_eq->enable_aic == 0 &&
45748- coalesce->use_adaptive_rx_coalesce == 1) {
45749- rx_eq->cur_eqd = 0;
45750+ for_all_rx_queues(adapter, rxo, i) {
45751+ rx_eq = &rxo->rx_eq;
45752+
45753+ if (!rx_eq->enable_aic && coalesce->use_adaptive_rx_coalesce)
45754+ rx_eq->cur_eqd = 0;
45755+ rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
45756+
45757+ rx_max = coalesce->rx_coalesce_usecs_high;
45758+ rx_min = coalesce->rx_coalesce_usecs_low;
45759+ rx_cur = coalesce->rx_coalesce_usecs;
45760+
45761+ if (rx_eq->enable_aic) {
45762+ if (rx_max > BE_MAX_EQD)
45763+ rx_max = BE_MAX_EQD;
45764+ if (rx_min > rx_max)
45765+ rx_min = rx_max;
45766+ rx_eq->max_eqd = rx_max;
45767+ rx_eq->min_eqd = rx_min;
45768+ if (rx_eq->cur_eqd > rx_max)
45769+ rx_eq->cur_eqd = rx_max;
45770+ if (rx_eq->cur_eqd < rx_min)
45771+ rx_eq->cur_eqd = rx_min;
45772+ } else {
45773+ if (rx_cur > BE_MAX_EQD)
45774+ rx_cur = BE_MAX_EQD;
45775+ if (rx_eq->cur_eqd != rx_cur) {
45776+ status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
45777+ rx_cur);
45778+ if (!status)
45779+ rx_eq->cur_eqd = rx_cur;
45780+ }
45781+ }
45782 }
45783- rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
45784-
45785- rx_max = coalesce->rx_coalesce_usecs_high;
45786- rx_min = coalesce->rx_coalesce_usecs_low;
45787- rx_cur = coalesce->rx_coalesce_usecs;
45788
45789 tx_max = coalesce->tx_coalesce_usecs_high;
45790 tx_min = coalesce->tx_coalesce_usecs_low;
45791@@ -181,27 +292,6 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
45792 tx_eq->cur_eqd = tx_cur;
45793 }
45794
45795- if (rx_eq->enable_aic) {
45796- if (rx_max > BE_MAX_EQD)
45797- rx_max = BE_MAX_EQD;
45798- if (rx_min > rx_max)
45799- rx_min = rx_max;
45800- rx_eq->max_eqd = rx_max;
45801- rx_eq->min_eqd = rx_min;
45802- if (rx_eq->cur_eqd > rx_max)
45803- rx_eq->cur_eqd = rx_max;
45804- if (rx_eq->cur_eqd < rx_min)
45805- rx_eq->cur_eqd = rx_min;
45806- } else {
45807- if (rx_cur > BE_MAX_EQD)
45808- rx_cur = BE_MAX_EQD;
45809- if (rx_eq->cur_eqd != rx_cur) {
45810- status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
45811- rx_cur);
45812- if (!status)
45813- rx_eq->cur_eqd = rx_cur;
45814- }
45815- }
45816 return 0;
45817 }
45818
45819@@ -229,81 +319,294 @@ be_get_ethtool_stats(struct net_device *netdev,
45820 struct ethtool_stats *stats, uint64_t *data)
45821 {
45822 struct be_adapter *adapter = netdev_priv(netdev);
45823- struct be_drvr_stats *drvr_stats = &adapter->stats.drvr_stats;
45824- struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
45825- struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
45826- struct be_port_rxf_stats *port_stats =
45827- &rxf_stats->port[adapter->port_num];
45828- struct net_device_stats *net_stats = &adapter->stats.net_stats;
45829- struct be_erx_stats *erx_stats = &hw_stats->erx;
45830+ struct be_rx_obj *rxo;
45831+ struct be_tx_obj *txo;
45832 void *p = NULL;
45833- int i;
45834+ int i, j, base;
45835
45836 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
45837 switch (et_stats[i].type) {
45838 case NETSTAT:
45839- p = net_stats;
45840+ p = &adapter->net_stats;
45841 break;
45842 case DRVSTAT:
45843- p = drvr_stats;
45844- break;
45845- case PORTSTAT:
45846- p = port_stats;
45847- break;
45848- case MISCSTAT:
45849- p = rxf_stats;
45850- break;
45851- case ERXSTAT: /* Currently only one ERX stat is provided */
45852- p = (u32 *)erx_stats + adapter->rx_obj.q.id;
45853+ p = &adapter->drv_stats;
45854 break;
45855 }
45856
45857 p = (u8 *)p + et_stats[i].offset;
45858 data[i] = (et_stats[i].size == sizeof(u64)) ?
45859- *(u64 *)p: *(u32 *)p;
45860+ *(u64 *)p:(*(u32 *)p);
45861 }
45862
45863- return;
45864+ base = ETHTOOL_STATS_NUM;
45865+ for_all_rx_queues(adapter, rxo, j) {
45866+ for (i = 0; i < ETHTOOL_RXSTATS_NUM; i++) {
45867+ p = (u8 *)&rxo->stats + et_rx_stats[i].offset;
45868+ data[base + j * ETHTOOL_RXSTATS_NUM + i] =
45869+ (et_rx_stats[i].size == sizeof(u64)) ?
45870+ *(u64 *)p: *(u32 *)p;
45871+ }
45872+ }
45873+
45874+ base = ETHTOOL_STATS_NUM + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM;
45875+ for_all_tx_queues(adapter, txo, j) {
45876+ for (i = 0; i < ETHTOOL_TXSTATS_NUM; i++) {
45877+ p = (u8 *)&txo->stats + et_tx_stats[i].offset;
45878+ data[base + j * ETHTOOL_TXSTATS_NUM + i] =
45879+ (et_tx_stats[i].size == sizeof(u64)) ?
45880+ *(u64 *)p: *(u32 *)p;
45881+ }
45882+ }
45883 }
45884
45885 static void
45886 be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
45887 uint8_t *data)
45888 {
45889- int i;
45890+ struct be_adapter *adapter = netdev_priv(netdev);
45891+ int i, j;
45892+
45893 switch (stringset) {
45894 case ETH_SS_STATS:
45895 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
45896 memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
45897 data += ETH_GSTRING_LEN;
45898 }
45899+ for (i = 0; i < adapter->num_rx_qs; i++) {
45900+ for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
45901+ sprintf(data, "rxq%d: %s", i,
45902+ et_rx_stats[j].desc);
45903+ data += ETH_GSTRING_LEN;
45904+ }
45905+ }
45906+ for (i = 0; i < adapter->num_tx_qs; i++) {
45907+ for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
45908+ sprintf(data, "txq%d: %s", i,
45909+ et_tx_stats[j].desc);
45910+ data += ETH_GSTRING_LEN;
45911+ }
45912+ }
45913+ break;
45914+ case ETH_SS_TEST:
45915+ for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
45916+ memcpy(data, et_self_tests[i], ETH_GSTRING_LEN);
45917+ data += ETH_GSTRING_LEN;
45918+ }
45919 break;
45920 }
45921 }
45922
45923+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
45924 static int be_get_stats_count(struct net_device *netdev)
45925 {
45926- return ETHTOOL_STATS_NUM;
45927+ struct be_adapter *adapter = netdev_priv(netdev);
45928+
45929+ return ETHTOOL_STATS_NUM + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM
45930+ + adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
45931 }
45932+static int
45933+be_self_test_count(struct net_device *dev)
45934+{
45935+ return ETHTOOL_TESTS_NUM;
45936+}
45937+#else
45938+
45939+static int be_get_sset_count(struct net_device *netdev, int stringset)
45940+{
45941+ struct be_adapter *adapter = netdev_priv(netdev);
45942+
45943+ switch (stringset) {
45944+ case ETH_SS_TEST:
45945+ return ETHTOOL_TESTS_NUM;
45946+ case ETH_SS_STATS:
45947+ return ETHTOOL_STATS_NUM +
45948+ adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
45949+ adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
45950+ default:
45951+ return -EINVAL;
45952+ }
45953+}
45954+#endif
45955
45956 static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
45957 {
45958- ecmd->speed = SPEED_10000;
45959+ struct be_adapter *adapter = netdev_priv(netdev);
45960+ struct be_phy_info phy_info;
45961+ u8 mac_speed = 0;
45962+ u16 link_speed = 0;
45963+ int link_status = LINK_DOWN;
45964+ int status;
45965+
45966+ if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
45967+ status = be_cmd_link_status_query(adapter, &link_status,
45968+ &mac_speed, &link_speed, 0);
45969+
45970+ be_link_status_update(adapter, link_status);
45971+ /* link_speed is in units of 10 Mbps */
45972+ if (link_speed) {
45973+ ecmd->speed = link_speed*10;
45974+ } else {
45975+ switch (mac_speed) {
45976+ case PHY_LINK_SPEED_10MBPS:
45977+ ecmd->speed = SPEED_10;
45978+ break;
45979+ case PHY_LINK_SPEED_100MBPS:
45980+ ecmd->speed = SPEED_100;
45981+ break;
45982+ case PHY_LINK_SPEED_1GBPS:
45983+ ecmd->speed = SPEED_1000;
45984+ break;
45985+ case PHY_LINK_SPEED_10GBPS:
45986+ ecmd->speed = SPEED_10000;
45987+ break;
45988+ case PHY_LINK_SPEED_ZERO:
45989+ ecmd->speed = 0;
45990+ break;
45991+ }
45992+ }
45993+
45994+ status = be_cmd_get_phy_info(adapter, &phy_info);
45995+ if (!status) {
45996+ switch (phy_info.interface_type) {
45997+ case PHY_TYPE_XFP_10GB:
45998+ case PHY_TYPE_SFP_1GB:
45999+ case PHY_TYPE_SFP_PLUS_10GB:
46000+ ecmd->port = PORT_FIBRE;
46001+ break;
46002+ default:
46003+ ecmd->port = PORT_TP;
46004+ break;
46005+ }
46006+
46007+ switch (phy_info.interface_type) {
46008+ case PHY_TYPE_KR_10GB:
46009+ case PHY_TYPE_KX4_10GB:
46010+ ecmd->transceiver = XCVR_INTERNAL;
46011+ break;
46012+ default:
46013+ ecmd->transceiver = XCVR_EXTERNAL;
46014+ break;
46015+ }
46016+
46017+ if (phy_info.auto_speeds_supported) {
46018+ ecmd->supported |= SUPPORTED_Autoneg;
46019+ ecmd->autoneg = AUTONEG_ENABLE;
46020+ ecmd->advertising |= ADVERTISED_Autoneg;
46021+ }
46022+
46023+ if (phy_info.misc_params & BE_PAUSE_SYM_EN) {
46024+ ecmd->supported |= SUPPORTED_Pause;
46025+ ecmd->advertising |= ADVERTISED_Pause;
46026+ }
46027+
46028+ }
46029+
46030+ /* Save for future use */
46031+ adapter->link_speed = ecmd->speed;
46032+ adapter->port_type = ecmd->port;
46033+ adapter->transceiver = ecmd->transceiver;
46034+ adapter->autoneg = ecmd->autoneg;
46035+ } else {
46036+ ecmd->speed = adapter->link_speed;
46037+ ecmd->port = adapter->port_type;
46038+ ecmd->transceiver = adapter->transceiver;
46039+ ecmd->autoneg = adapter->autoneg;
46040+ }
46041+
46042 ecmd->duplex = DUPLEX_FULL;
46043- ecmd->autoneg = AUTONEG_DISABLE;
46044+ ecmd->phy_address = (adapter->hba_port_num << 4) |
46045+ (adapter->port_name[adapter->hba_port_num]);
46046+ switch (ecmd->port) {
46047+ case PORT_FIBRE:
46048+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
46049+ break;
46050+ case PORT_TP:
46051+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
46052+ break;
46053+ }
46054+
46055+ if (ecmd->autoneg) {
46056+ ecmd->supported |= SUPPORTED_1000baseT_Full;
46057+ ecmd->advertising |= (ADVERTISED_10000baseT_Full |
46058+ ADVERTISED_1000baseT_Full);
46059+ }
46060+
46061 return 0;
46062 }
46063
46064+static int be_set_settings(struct net_device *netdev,
46065+ struct ethtool_cmd *ecmd)
46066+{
46067+ struct be_adapter *adapter = netdev_priv(netdev);
46068+ struct be_phy_info phy_info;
46069+ u16 mac_speed=0;
46070+ u16 dac_cable_len=0;
46071+ u16 port_speed = 0;
46072+ int status;
46073+
46074+ status = be_cmd_get_phy_info(adapter, &phy_info);
46075+ if (status) {
46076+ dev_warn(&adapter->pdev->dev, "port speed set failed.\n");
46077+ return status;
46078+ }
46079+
46080+ if (ecmd->autoneg == AUTONEG_ENABLE) {
46081+ switch(phy_info.interface_type) {
46082+ case PHY_TYPE_SFP_1GB:
46083+ case PHY_TYPE_BASET_1GB:
46084+ case PHY_TYPE_BASEX_1GB:
46085+ case PHY_TYPE_SGMII:
46086+ mac_speed = SPEED_AUTONEG_1GB_100MB_10MB;
46087+ break;
46088+ case PHY_TYPE_SFP_PLUS_10GB:
46089+ dev_warn(&adapter->pdev->dev,
46090+ "Autoneg not supported on this module. \n");
46091+ return -EINVAL;
46092+ case PHY_TYPE_KR_10GB:
46093+ case PHY_TYPE_KX4_10GB:
46094+ mac_speed = SPEED_AUTONEG_10GB_1GB;
46095+ break;
46096+ case PHY_TYPE_BASET_10GB:
46097+ mac_speed = SPEED_AUTONEG_10GB_1GB_100MB;
46098+ break;
46099+ }
46100+ } else if(ecmd->autoneg == AUTONEG_DISABLE) {
46101+ if(ecmd->speed == SPEED_10) {
46102+ mac_speed = SPEED_FORCED_10MB;
46103+ } else if(ecmd->speed == SPEED_100) {
46104+ mac_speed = SPEED_FORCED_100MB;
46105+ } else if(ecmd->speed == SPEED_1000) {
46106+ mac_speed = SPEED_FORCED_1GB;
46107+ } else if(ecmd->speed == SPEED_10000) {
46108+ mac_speed = SPEED_FORCED_10GB;
46109+ }
46110+ }
46111+
46112+ status = be_cmd_get_port_speed(adapter, adapter->hba_port_num,
46113+ &dac_cable_len, &port_speed);
46114+
46115+ if (!status && port_speed != mac_speed)
46116+ status = be_cmd_set_port_speed_v1(adapter,
46117+ adapter->hba_port_num, mac_speed,
46118+ dac_cable_len);
46119+ if (status)
46120+ dev_warn(&adapter->pdev->dev, "port speed set failed.\n");
46121+
46122+ return status;
46123+
46124+}
46125+
46126 static void
46127 be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
46128 {
46129 struct be_adapter *adapter = netdev_priv(netdev);
46130
46131- ring->rx_max_pending = adapter->rx_obj.q.len;
46132- ring->tx_max_pending = adapter->tx_obj.q.len;
46133+ ring->rx_max_pending = adapter->rx_obj[0].q.len;
46134+ ring->tx_max_pending = adapter->tx_obj[0].q.len;
46135
46136- ring->rx_pending = atomic_read(&adapter->rx_obj.q.used);
46137- ring->tx_pending = atomic_read(&adapter->tx_obj.q.used);
46138+ ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
46139+ ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used);
46140 }
46141
46142 static void
46143@@ -312,7 +615,7 @@ be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
46144 struct be_adapter *adapter = netdev_priv(netdev);
46145
46146 be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
46147- ecmd->autoneg = 0;
46148+ ecmd->autoneg = adapter->autoneg;
46149 }
46150
46151 static int
46152@@ -334,6 +637,203 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
46153 return status;
46154 }
46155
46156+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
46157+static int
46158+be_phys_id(struct net_device *netdev, u32 data)
46159+{
46160+ struct be_adapter *adapter = netdev_priv(netdev);
46161+ int status;
46162+ u32 cur;
46163+
46164+ be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &cur);
46165+
46166+ if (cur == BEACON_STATE_ENABLED)
46167+ return 0;
46168+
46169+ if (data < 2)
46170+ data = 2;
46171+
46172+ status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
46173+ BEACON_STATE_ENABLED);
46174+ set_current_state(TASK_INTERRUPTIBLE);
46175+ schedule_timeout(data*HZ);
46176+
46177+ status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
46178+ BEACON_STATE_DISABLED);
46179+
46180+ return status;
46181+}
46182+#else
46183+static int
46184+be_set_phys_id(struct net_device *netdev,
46185+ enum ethtool_phys_id_state state)
46186+{
46187+ struct be_adapter *adapter = netdev_priv(netdev);
46188+
46189+ switch (state) {
46190+ case ETHTOOL_ID_ACTIVE:
46191+ be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
46192+ &adapter->beacon_state);
46193+ return 1; /* cycle on/off once per second */
46194+
46195+ case ETHTOOL_ID_ON:
46196+ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
46197+ BEACON_STATE_ENABLED);
46198+ break;
46199+
46200+ case ETHTOOL_ID_OFF:
46201+ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
46202+ BEACON_STATE_DISABLED);
46203+ break;
46204+
46205+ case ETHTOOL_ID_INACTIVE:
46206+ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
46207+ adapter->beacon_state);
46208+ }
46209+
46210+ return 0;
46211+}
46212+#endif
46213+
46214+static bool
46215+be_is_wol_supported(struct be_adapter *adapter)
46216+{
46217+ struct pci_dev *pdev = adapter->pdev;
46218+
46219+ if (!be_physfn(adapter))
46220+ return false;
46221+
46222+ switch (pdev->subsystem_device) {
46223+ case OC_SUBSYS_DEVICE_ID1:
46224+ case OC_SUBSYS_DEVICE_ID2:
46225+ case OC_SUBSYS_DEVICE_ID3:
46226+ case OC_SUBSYS_DEVICE_ID4:
46227+ return false;
46228+ default:
46229+ return true;
46230+ }
46231+}
46232+
46233+static void
46234+be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
46235+{
46236+ struct be_adapter *adapter = netdev_priv(netdev);
46237+
46238+ if (be_is_wol_supported(adapter))
46239+ wol->supported = WAKE_MAGIC;
46240+ if (adapter->wol)
46241+ wol->wolopts = WAKE_MAGIC;
46242+ else
46243+ wol->wolopts = 0;
46244+ memset(&wol->sopass, 0, sizeof(wol->sopass));
46245+}
46246+
46247+static int
46248+be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
46249+{
46250+ struct be_adapter *adapter = netdev_priv(netdev);
46251+
46252+ if (wol->wolopts & ~WAKE_MAGIC)
46253+ return -EOPNOTSUPP;
46254+
46255+ if (!be_is_wol_supported(adapter)) {
46256+ dev_warn(&adapter->pdev->dev,
46257+ "WOL not supported for this subsystemid: %x\n",
46258+ adapter->pdev->subsystem_device);
46259+ return -EOPNOTSUPP;
46260+ }
46261+
46262+ if (wol->wolopts & WAKE_MAGIC)
46263+ adapter->wol = true;
46264+ else
46265+ adapter->wol = false;
46266+
46267+ return 0;
46268+}
46269+
46270+static int
46271+be_test_ddr_dma(struct be_adapter *adapter)
46272+{
46273+ int ret, i;
46274+ struct be_dma_mem ddrdma_cmd;
46275+ u64 pattern[2] = {0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL};
46276+
46277+ ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
46278+ ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
46279+ &ddrdma_cmd.dma);
46280+ if (!ddrdma_cmd.va) {
46281+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
46282+ return -ENOMEM;
46283+ }
46284+
46285+ for (i = 0; i < 2; i++) {
46286+ ret = be_cmd_ddr_dma_test(adapter, pattern[i],
46287+ 4096, &ddrdma_cmd);
46288+ if (ret != 0)
46289+ goto err;
46290+ }
46291+
46292+err:
46293+ pci_free_consistent(adapter->pdev, ddrdma_cmd.size,
46294+ ddrdma_cmd.va, ddrdma_cmd.dma);
46295+ return ret;
46296+}
46297+
46298+static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
46299+ u64 *status)
46300+{
46301+ be_cmd_set_loopback(adapter, adapter->hba_port_num,
46302+ loopback_type, 1);
46303+ *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
46304+ loopback_type, 1500,
46305+ 2, 0xabc);
46306+ be_cmd_set_loopback(adapter, adapter->hba_port_num,
46307+ BE_NO_LOOPBACK, 1);
46308+ return *status;
46309+}
46310+
46311+static void
46312+be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
46313+{
46314+ struct be_adapter *adapter = netdev_priv(netdev);
46315+ int link_status;
46316+ u8 mac_speed = 0;
46317+ u16 qos_link_speed = 0;
46318+
46319+ memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
46320+
46321+ if (test->flags & ETH_TEST_FL_OFFLINE) {
46322+ if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
46323+ &data[0]) != 0) {
46324+ test->flags |= ETH_TEST_FL_FAILED;
46325+ }
46326+ if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
46327+ &data[1]) != 0) {
46328+ test->flags |= ETH_TEST_FL_FAILED;
46329+ }
46330+ if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
46331+ &data[2]) != 0) {
46332+ test->flags |= ETH_TEST_FL_FAILED;
46333+ }
46334+ }
46335+
46336+ if (be_test_ddr_dma(adapter) != 0) {
46337+ data[3] = 1;
46338+ test->flags |= ETH_TEST_FL_FAILED;
46339+ }
46340+
46341+ if (be_cmd_link_status_query(adapter, &link_status, &mac_speed,
46342+ &qos_link_speed, 0) != 0) {
46343+ test->flags |= ETH_TEST_FL_FAILED;
46344+ data[4] = -1;
46345+ } else if (!mac_speed) {
46346+ test->flags |= ETH_TEST_FL_FAILED;
46347+ data[4] = 1;
46348+ }
46349+
46350+}
46351+
46352+#ifdef HAVE_ETHTOOL_FLASH
46353 static int
46354 be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
46355 {
46356@@ -347,11 +847,73 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
46357
46358 return be_load_fw(adapter, file_name);
46359 }
46360+#endif
46361
46362-const struct ethtool_ops be_ethtool_ops = {
46363+static int
46364+be_get_eeprom_len(struct net_device *netdev)
46365+{
46366+ return BE_READ_SEEPROM_LEN;
46367+}
46368+
46369+static int
46370+be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
46371+ uint8_t *data)
46372+{
46373+ struct be_adapter *adapter = netdev_priv(netdev);
46374+ struct be_dma_mem eeprom_cmd;
46375+ struct be_cmd_resp_seeprom_read *resp;
46376+ int status;
46377+
46378+ if (!eeprom->len)
46379+ return -EINVAL;
46380+
46381+ eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
46382+
46383+ memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
46384+ eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
46385+ eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size,
46386+ &eeprom_cmd.dma);
46387+
46388+ if (!eeprom_cmd.va) {
46389+ dev_err(&adapter->pdev->dev,
46390+ "Memory allocation failure. Could not read eeprom\n");
46391+ return -ENOMEM;
46392+ }
46393+
46394+ status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
46395+
46396+ if (!status) {
46397+ resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
46398+ memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
46399+ }
46400+ pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
46401+ eeprom_cmd.dma);
46402+
46403+ return status;
46404+}
46405+
46406+static int be_set_tso(struct net_device *netdev, uint32_t data)
46407+{
46408+ if (data) {
46409+ netdev->features |= NETIF_F_TSO;
46410+ netdev->features |= NETIF_F_TSO6;
46411+ } else {
46412+ netdev->features &= ~NETIF_F_TSO;
46413+ netdev->features &= ~NETIF_F_TSO6;
46414+ }
46415+ return 0;
46416+}
46417+
46418+
46419+struct ethtool_ops be_ethtool_ops = {
46420 .get_settings = be_get_settings,
46421+ .set_settings = be_set_settings,
46422 .get_drvinfo = be_get_drvinfo,
46423+ .get_wol = be_get_wol,
46424+ .set_wol = be_set_wol,
46425 .get_link = ethtool_op_get_link,
46426+ .get_eeprom_len = be_get_eeprom_len,
46427+ .get_eeprom = be_read_eeprom,
46428 .get_coalesce = be_get_coalesce,
46429 .set_coalesce = be_set_coalesce,
46430 .get_ringparam = be_get_ringparam,
46431@@ -364,9 +926,21 @@ const struct ethtool_ops be_ethtool_ops = {
46432 .get_sg = ethtool_op_get_sg,
46433 .set_sg = ethtool_op_set_sg,
46434 .get_tso = ethtool_op_get_tso,
46435- .set_tso = ethtool_op_set_tso,
46436+ .set_tso = be_set_tso,
46437 .get_strings = be_get_stat_strings,
46438+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
46439+ .phys_id = be_phys_id,
46440 .get_stats_count = be_get_stats_count,
46441+ .self_test_count = be_self_test_count,
46442+#else
46443+ .set_phys_id = be_set_phys_id,
46444+ .get_sset_count = be_get_sset_count,
46445+#endif
46446 .get_ethtool_stats = be_get_ethtool_stats,
46447+ .get_regs_len = be_get_reg_len,
46448+ .get_regs = be_get_regs,
46449+#ifdef HAVE_ETHTOOL_FLASH
46450 .flash_device = be_do_flash,
46451+#endif
46452+ .self_test = be_self_test
46453 };
46454diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
46455index a3394b4..f871d8c 100644
46456--- a/drivers/net/benet/be_hw.h
46457+++ b/drivers/net/benet/be_hw.h
46458@@ -1,18 +1,18 @@
46459 /*
46460- * Copyright (C) 2005 - 2009 ServerEngines
46461+ * Copyright (C) 2005 - 2011 Emulex
46462 * All rights reserved.
46463 *
46464 * This program is free software; you can redistribute it and/or
46465 * modify it under the terms of the GNU General Public License version 2
46466- * as published by the Free Software Foundation. The full GNU General
46467+ * as published by the Free Software Foundation. The full GNU General
46468 * Public License is included in this distribution in the file called COPYING.
46469 *
46470 * Contact Information:
46471- * linux-drivers@serverengines.com
46472+ * linux-drivers@emulex.com
46473 *
46474- * ServerEngines
46475- * 209 N. Fair Oaks Ave
46476- * Sunnyvale, CA 94085
46477+ * Emulex
46478+ * 3333 Susan Street
46479+ * Costa Mesa, CA 92626
46480 */
46481
46482 /********* Mailbox door bell *************/
46483@@ -26,24 +26,34 @@
46484 * queue entry.
46485 */
46486 #define MPU_MAILBOX_DB_OFFSET 0x160
46487-#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
46488+#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
46489 #define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */
46490
46491-#define MPU_EP_CONTROL 0
46492+#define MPU_EP_CONTROL 0
46493
46494 /********** MPU semphore ******************/
46495-#define MPU_EP_SEMAPHORE_OFFSET 0xac
46496+#define MPU_EP_SEMAPHORE_OFFSET 0xac
46497+#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400
46498 #define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
46499 #define EP_SEMAPHORE_POST_ERR_MASK 0x1
46500 #define EP_SEMAPHORE_POST_ERR_SHIFT 31
46501 /* MPU semphore POST stage values */
46502-#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
46503-#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
46504+#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
46505+#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
46506 #define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
46507 #define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
46508
46509+/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
46510+#define SLIPORT_STATUS_OFFSET 0x404
46511+#define SLIPORT_CONTROL_OFFSET 0x408
46512+
46513+#define SLIPORT_STATUS_ERR_MASK 0x80000000
46514+#define SLIPORT_STATUS_RN_MASK 0x01000000
46515+#define SLIPORT_STATUS_RDY_MASK 0x00800000
46516+#define SLI_PORT_CONTROL_IP_MASK 0x08000000
46517+
46518 /********* Memory BAR register ************/
46519-#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
46520+#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
46521 /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
46522 * Disable" may still globally block interrupts in addition to individual
46523 * interrupt masks; a mechanism for the device driver to block all interrupts
46524@@ -52,13 +62,70 @@
46525 */
46526 #define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
46527
46528+/********* Link Status CSR ****************/
46529+#define PCICFG_PCIE_LINK_STATUS_OFFSET 0xd0
46530+#define PCIE_LINK_STATUS_SPEED_MASK 0xFF /* bits 16 - 19 */
46531+#define PCIE_LINK_STATUS_SPEED_SHIFT 16
46532+#define PCIE_LINK_STATUS_NEG_WIDTH_MASK 0x3F /* bits 20 - 25 */
46533+#define PCIE_LINK_STATUS_NEG_WIDTH_SHIFT 20
46534+
46535+/********* Link Capability CSR ************/
46536+#define PCICFG_PCIE_LINK_CAP_OFFSET 0xcc
46537+#define PCIE_LINK_CAP_MAX_SPEED_MASK 0xFF /* bits 0 - 3 */
46538+#define PCIE_LINK_CAP_MAX_SPEED_SHIFT 0
46539+#define PCIE_LINK_CAP_MAX_WIDTH_MASK 0x3F /* bits 4 - 9 */
46540+#define PCIE_LINK_CAP_MAX_WIDTH_SHIFT 4
46541+
46542+/********* PCI Function Capability ************/
46543+#define BE_FUNCTION_CAPS_UNCLASSIFIED_STATS 0x1
46544+#define BE_FUNCTION_CAPS_RSS 0x2
46545+#define BE_FUNCTION_CAPS_PROMISCUOUS 0x4
46546+#define BE_FUNCTION_CAPS_LEGACY_MODE 0x8
46547+
46548+/********* Power managment (WOL) **********/
46549+#define PCICFG_PM_CONTROL_OFFSET 0x44
46550+#define PCICFG_PM_CONTROL_MASK 0x108 /* bits 3 & 8 */
46551+
46552+/********* Online Control Registers *******/
46553+#define PCICFG_ONLINE0 0xB0
46554+#define PCICFG_ONLINE1 0xB4
46555+
46556+/********* UE Status and Mask Registers ***/
46557+#define PCICFG_UE_STATUS_LOW 0xA0
46558+#define PCICFG_UE_STATUS_HIGH 0xA4
46559+#define PCICFG_UE_STATUS_LOW_MASK 0xA8
46560+#define PCICFG_UE_STATUS_HI_MASK 0xAC
46561+
46562+/******** SLI_INTF ***********************/
46563+#define SLI_INTF_REG_OFFSET 0x58
46564+#define SLI_INTF_VALID_MASK 0xE0000000
46565+#define SLI_INTF_VALID 0xC0000000
46566+#define SLI_INTF_HINT2_MASK 0x1F000000
46567+#define SLI_INTF_HINT2_SHIFT 24
46568+#define SLI_INTF_HINT1_MASK 0x00FF0000
46569+#define SLI_INTF_HINT1_SHIFT 16
46570+#define SLI_INTF_FAMILY_MASK 0x00000F00
46571+#define SLI_INTF_FAMILY_SHIFT 8
46572+#define SLI_INTF_IF_TYPE_MASK 0x0000F000
46573+#define SLI_INTF_IF_TYPE_SHIFT 12
46574+#define SLI_INTF_REV_MASK 0x000000F0
46575+#define SLI_INTF_REV_SHIFT 4
46576+#define SLI_INTF_FT_MASK 0x00000001
46577+
46578+/* SLI family */
46579+#define BE_SLI_FAMILY 0x0
46580+#define LANCER_A0_SLI_FAMILY 0xA
46581+
46582 /********* ISR0 Register offset **********/
46583-#define CEV_ISR0_OFFSET 0xC18
46584+#define CEV_ISR0_OFFSET 0xC18
46585 #define CEV_ISR_SIZE 4
46586
46587 /********* Event Q door bell *************/
46588 #define DB_EQ_OFFSET DB_CQ_OFFSET
46589 #define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
46590+#define DB_EQ_RING_ID_EXT_MASK 0x3e00 /* bits 9-13 */
46591+#define DB_EQ_RING_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 placing at 11-15 */
46592+
46593 /* Clear the interrupt for this eq */
46594 #define DB_EQ_CLR_SHIFT (9) /* bit 9 */
46595 /* Must be 1 */
46596@@ -69,12 +136,16 @@
46597 #define DB_EQ_REARM_SHIFT (29) /* bit 29 */
46598
46599 /********* Compl Q door bell *************/
46600-#define DB_CQ_OFFSET 0x120
46601+#define DB_CQ_OFFSET 0x120
46602 #define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
46603+#define DB_CQ_RING_ID_EXT_MASK 0x7C00 /* bits 10-14 */
46604+#define DB_CQ_RING_ID_EXT_MASK_SHIFT (1) /* qid bits 10-14
46605+ placing at 11-15 */
46606+
46607 /* Number of event entries processed */
46608-#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
46609+#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
46610 /* Rearm bit */
46611-#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
46612+#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
46613
46614 /********** TX ULP door bell *************/
46615 #define DB_TXULP1_OFFSET 0x60
46616@@ -84,25 +155,103 @@
46617 #define DB_TXULP_NUM_POSTED_MASK 0x3FFF /* bits 16 - 29 */
46618
46619 /********** RQ(erx) door bell ************/
46620-#define DB_RQ_OFFSET 0x100
46621+#define DB_RQ_OFFSET 0x100
46622 #define DB_RQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
46623 /* Number of rx frags posted */
46624 #define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */
46625
46626 /********** MCC door bell ************/
46627-#define DB_MCCQ_OFFSET 0x140
46628+#define DB_MCCQ_OFFSET 0x140
46629 #define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
46630 /* Number of entries posted */
46631 #define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
46632
46633+/********** SRIOV VF PCICFG OFFSET ********/
46634+#define SRIOV_VF_PCICFG_OFFSET (4096)
46635+
46636+/********** FAT TABLE ********/
46637+#define RETRIEVE_FAT 0
46638+#define QUERY_FAT 1
46639+
46640+/* Flashrom related descriptors */
46641+#define IMAGE_TYPE_FIRMWARE 160
46642+#define IMAGE_TYPE_BOOTCODE 224
46643+#define IMAGE_TYPE_OPTIONROM 32
46644+
46645+#define NUM_FLASHDIR_ENTRIES 32
46646+
46647+#define IMG_TYPE_ISCSI_ACTIVE 0
46648+#define IMG_TYPE_REDBOOT 1
46649+#define IMG_TYPE_BIOS 2
46650+#define IMG_TYPE_PXE_BIOS 3
46651+#define IMG_TYPE_FCOE_BIOS 8
46652+#define IMG_TYPE_ISCSI_BACKUP 9
46653+#define IMG_TYPE_FCOE_FW_ACTIVE 10
46654+#define IMG_TYPE_FCOE_FW_BACKUP 11
46655+#define IMG_TYPE_NCSI_FW 13
46656+#define IMG_TYPE_PHY_FW 99
46657+#define TN_8022 13
46658+
46659+#define ILLEGAL_IOCTL_REQ 2
46660+#define FLASHROM_OPER_PHY_FLASH 9
46661+#define FLASHROM_OPER_PHY_SAVE 10
46662+#define FLASHROM_OPER_FLASH 1
46663+#define FLASHROM_OPER_SAVE 2
46664+#define FLASHROM_OPER_REPORT 4
46665+
46666+#define FLASH_IMAGE_MAX_SIZE_g2 (1310720) /* Max firmware image size */
46667+#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 (262144) /* Max OPTION ROM image sz */
46668+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 (262144) /* Max Redboot image sz */
46669+#define FLASH_IMAGE_MAX_SIZE_g3 (2097152) /* Max firmware image size */
46670+#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 (524288) /* Max OPTION ROM image sz */
46671+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 (1048576) /* Max Redboot image sz */
46672+#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 (262144)
46673+#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 (262144)
46674+
46675+#define FLASH_NCSI_MAGIC (0x16032009)
46676+#define FLASH_NCSI_DISABLED (0)
46677+#define FLASH_NCSI_ENABLED (1)
46678+
46679+#define FLASH_NCSI_BITFILE_HDR_OFFSET (0x600000)
46680+
46681+/* Offsets for components on Flash. */
46682+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 (1048576)
46683+#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 (2359296)
46684+#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 (3670016)
46685+#define FLASH_FCoE_BACKUP_IMAGE_START_g2 (4980736)
46686+#define FLASH_iSCSI_BIOS_START_g2 (7340032)
46687+#define FLASH_PXE_BIOS_START_g2 (7864320)
46688+#define FLASH_FCoE_BIOS_START_g2 (524288)
46689+#define FLASH_REDBOOT_START_g2 (0)
46690+
46691+#define FLASH_NCSI_START_g3 (15990784)
46692+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 (2097152)
46693+#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 (4194304)
46694+#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 (6291456)
46695+#define FLASH_FCoE_BACKUP_IMAGE_START_g3 (8388608)
46696+#define FLASH_iSCSI_BIOS_START_g3 (12582912)
46697+#define FLASH_PXE_BIOS_START_g3 (13107200)
46698+#define FLASH_FCoE_BIOS_START_g3 (13631488)
46699+#define FLASH_REDBOOT_START_g3 (262144)
46700+#define FLASH_PHY_FW_START_g3 (1310720)
46701+
46702+/************* Rx Packet Type Encoding **************/
46703+#define BE_UNICAST_PACKET 0
46704+#define BE_MULTICAST_PACKET 1
46705+#define BE_BROADCAST_PACKET 2
46706+#define BE_RSVD_PACKET 3
46707+
46708 /*
46709 * BE descriptors: host memory data structures whose formats
46710 * are hardwired in BE silicon.
46711 */
46712 /* Event Queue Descriptor */
46713-#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
46714-#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
46715-#define EQ_ENTRY_RES_ID_SHIFT 16
46716+#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
46717+#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
46718+#define EQ_ENTRY_RES_ID_SHIFT 16
46719+
46720+#define BE_MAC_PROMISCUOUS 62 /* Promiscuous mode */
46721+
46722 struct be_eq_entry {
46723 u32 evt;
46724 };
46725@@ -126,7 +275,7 @@ struct amap_eth_hdr_wrb {
46726 u8 event;
46727 u8 crc;
46728 u8 forward;
46729- u8 ipsec;
46730+ u8 lso6;
46731 u8 mgmt;
46732 u8 ipcs;
46733 u8 udpcs;
46734@@ -151,7 +300,7 @@ struct be_eth_hdr_wrb {
46735 * offset/shift/mask of each field */
46736 struct amap_eth_tx_compl {
46737 u8 wrb_index[16]; /* dword 0 */
46738- u8 ct[2]; /* dword 0 */
46739+ u8 ct[2]; /* dword 0 */
46740 u8 port[2]; /* dword 0 */
46741 u8 rsvd0[8]; /* dword 0 */
46742 u8 status[4]; /* dword 0 */
46743@@ -179,10 +328,10 @@ struct be_eth_rx_d {
46744
46745 /* RX Compl Queue Descriptor */
46746
46747-/* Pseudo amap definition for eth_rx_compl in which each bit of the
46748- * actual structure is defined as a byte: used to calculate
46749+/* Pseudo amap definition for BE2 and BE3 legacy mode eth_rx_compl in which
46750+ * each bit of the actual structure is defined as a byte: used to calculate
46751 * offset/shift/mask of each field */
46752-struct amap_eth_rx_compl {
46753+struct amap_eth_rx_compl_v0 {
46754 u8 vlan_tag[16]; /* dword 0 */
46755 u8 pktsize[14]; /* dword 0 */
46756 u8 port; /* dword 0 */
46757@@ -213,39 +362,91 @@ struct amap_eth_rx_compl {
46758 u8 rsshash[32]; /* dword 3 */
46759 } __packed;
46760
46761+/* Pseudo amap definition for BE3 native mode eth_rx_compl in which
46762+ * each bit of the actual structure is defined as a byte: used to calculate
46763+ * offset/shift/mask of each field */
46764+struct amap_eth_rx_compl_v1 {
46765+ u8 vlan_tag[16]; /* dword 0 */
46766+ u8 pktsize[14]; /* dword 0 */
46767+ u8 vtp; /* dword 0 */
46768+ u8 ip_opt; /* dword 0 */
46769+ u8 err; /* dword 1 */
46770+ u8 rsshp; /* dword 1 */
46771+ u8 ipf; /* dword 1 */
46772+ u8 tcpf; /* dword 1 */
46773+ u8 udpf; /* dword 1 */
46774+ u8 ipcksm; /* dword 1 */
46775+ u8 l4_cksm; /* dword 1 */
46776+ u8 ip_version; /* dword 1 */
46777+ u8 macdst[7]; /* dword 1 */
46778+ u8 rsvd0; /* dword 1 */
46779+ u8 fragndx[10]; /* dword 1 */
46780+ u8 ct[2]; /* dword 1 */
46781+ u8 sw; /* dword 1 */
46782+ u8 numfrags[3]; /* dword 1 */
46783+ u8 rss_flush; /* dword 2 */
46784+ u8 cast_enc[2]; /* dword 2 */
46785+ u8 vtm; /* dword 2 */
46786+ u8 rss_bank; /* dword 2 */
46787+ u8 port[2]; /* dword 2 */
46788+ u8 vntagp; /* dword 2 */
46789+ u8 header_len[8]; /* dword 2 */
46790+ u8 header_split[2]; /* dword 2 */
46791+ u8 rsvd1[13]; /* dword 2 */
46792+ u8 valid; /* dword 2 */
46793+ u8 rsshash[32]; /* dword 3 */
46794+} __packed;
46795+
46796 struct be_eth_rx_compl {
46797 u32 dw[4];
46798 };
46799
46800-/* Flashrom related descriptors */
46801-#define IMAGE_TYPE_FIRMWARE 160
46802-#define IMAGE_TYPE_BOOTCODE 224
46803-#define IMAGE_TYPE_OPTIONROM 32
46804+struct mgmt_hba_attribs {
46805+ u8 flashrom_version_string[32];
46806+ u8 manufacturer_name[32];
46807+ u32 supported_modes;
46808+ u32 rsvd0[3];
46809+ u8 ncsi_ver_string[12];
46810+ u32 default_extended_timeout;
46811+ u8 controller_model_number[32];
46812+ u8 controller_description[64];
46813+ u8 controller_serial_number[32];
46814+ u8 ip_version_string[32];
46815+ u8 firmware_version_string[32];
46816+ u8 bios_version_string[32];
46817+ u8 redboot_version_string[32];
46818+ u8 driver_version_string[32];
46819+ u8 fw_on_flash_version_string[32];
46820+ u32 functionalities_supported;
46821+ u16 max_cdblength;
46822+ u8 asic_revision;
46823+ u8 generational_guid[16];
46824+ u8 hba_port_count;
46825+ u16 default_link_down_timeout;
46826+ u8 iscsi_ver_min_max;
46827+ u8 multifunction_device;
46828+ u8 cache_valid;
46829+ u8 hba_status;
46830+ u8 max_domains_supported;
46831+ u8 phy_port;
46832+ u32 firmware_post_status;
46833+ u32 hba_mtu[8];
46834+ u32 rsvd1[4];
46835+};
46836
46837-#define NUM_FLASHDIR_ENTRIES 32
46838-
46839-#define FLASHROM_TYPE_ISCSI_ACTIVE 0
46840-#define FLASHROM_TYPE_BIOS 2
46841-#define FLASHROM_TYPE_PXE_BIOS 3
46842-#define FLASHROM_TYPE_FCOE_BIOS 8
46843-#define FLASHROM_TYPE_ISCSI_BACKUP 9
46844-#define FLASHROM_TYPE_FCOE_FW_ACTIVE 10
46845-#define FLASHROM_TYPE_FCOE_FW_BACKUP 11
46846-
46847-#define FLASHROM_OPER_FLASH 1
46848-#define FLASHROM_OPER_SAVE 2
46849-
46850-#define FLASH_IMAGE_MAX_SIZE (1310720) /* Max firmware image size */
46851-#define FLASH_BIOS_IMAGE_MAX_SIZE (262144) /* Max OPTION ROM image sz */
46852-
46853-/* Offsets for components on Flash. */
46854-#define FLASH_iSCSI_PRIMARY_IMAGE_START (1048576)
46855-#define FLASH_iSCSI_BACKUP_IMAGE_START (2359296)
46856-#define FLASH_FCoE_PRIMARY_IMAGE_START (3670016)
46857-#define FLASH_FCoE_BACKUP_IMAGE_START (4980736)
46858-#define FLASH_iSCSI_BIOS_START (7340032)
46859-#define FLASH_PXE_BIOS_START (7864320)
46860-#define FLASH_FCoE_BIOS_START (524288)
46861+struct mgmt_controller_attrib {
46862+ struct mgmt_hba_attribs hba_attribs;
46863+ u16 pci_vendor_id;
46864+ u16 pci_device_id;
46865+ u16 pci_sub_vendor_id;
46866+ u16 pci_sub_system_id;
46867+ u8 pci_bus_number;
46868+ u8 pci_device_number;
46869+ u8 pci_function_number;
46870+ u8 interface_type;
46871+ u64 unique_identifier;
46872+ u32 rsvd0[5];
46873+};
46874
46875 struct controller_id {
46876 u32 vendor;
46877@@ -254,7 +455,20 @@ struct controller_id {
46878 u32 subdevice;
46879 };
46880
46881-struct flash_file_hdr {
46882+struct flash_comp {
46883+ unsigned long offset;
46884+ int optype;
46885+ int size;
46886+};
46887+
46888+struct image_hdr {
46889+ u32 imageid;
46890+ u32 imageoffset;
46891+ u32 imagelength;
46892+ u32 image_checksum;
46893+ u8 image_version[32];
46894+};
46895+struct flash_file_hdr_g2 {
46896 u8 sign[32];
46897 u32 cksum;
46898 u32 antidote;
46899@@ -266,6 +480,17 @@ struct flash_file_hdr {
46900 u8 build[24];
46901 };
46902
46903+struct flash_file_hdr_g3 {
46904+ u8 sign[52];
46905+ u8 ufi_version[4];
46906+ u32 file_len;
46907+ u32 cksum;
46908+ u32 antidote;
46909+ u32 num_imgs;
46910+ u8 build[24];
46911+ u8 rsvd[32];
46912+};
46913+
46914 struct flash_section_hdr {
46915 u32 format_rev;
46916 u32 cksum;
46917@@ -299,3 +524,19 @@ struct flash_section_info {
46918 struct flash_section_hdr fsec_hdr;
46919 struct flash_section_entry fsec_entry[32];
46920 };
46921+
46922+struct flash_ncsi_image_hdr {
46923+ u32 magic;
46924+ u8 hdr_len;
46925+ u8 type;
46926+ u16 hdr_ver;
46927+ u8 rsvd0[2];
46928+ u16 load_offset;
46929+ u32 len;
46930+ u32 flash_offset;
46931+ u8 ver[16];
46932+ u8 name[24];
46933+ u32 img_cksum;
46934+ u8 rsvd1[4];
46935+ u32 hdr_cksum;
46936+};
46937diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
46938index 000e377..f501aa3 100644
46939--- a/drivers/net/benet/be_main.c
46940+++ b/drivers/net/benet/be_main.c
46941@@ -1,18 +1,18 @@
46942 /*
46943- * Copyright (C) 2005 - 2009 ServerEngines
46944+ * Copyright (C) 2005 - 2011 Emulex
46945 * All rights reserved.
46946 *
46947 * This program is free software; you can redistribute it and/or
46948 * modify it under the terms of the GNU General Public License version 2
46949- * as published by the Free Software Foundation. The full GNU General
46950+ * as published by the Free Software Foundation. The full GNU General
46951 * Public License is included in this distribution in the file called COPYING.
46952 *
46953 * Contact Information:
46954- * linux-drivers@serverengines.com
46955+ * linux-drivers@emulex.com
46956 *
46957- * ServerEngines
46958- * 209 N. Fair Oaks Ave
46959- * Sunnyvale, CA 94085
46960+ * Emulex
46961+ * 3333 Susan Street
46962+ * Costa Mesa, CA 92626
46963 */
46964
46965 #include "be.h"
46966@@ -22,23 +22,119 @@
46967 MODULE_VERSION(DRV_VER);
46968 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46969 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
46970-MODULE_AUTHOR("ServerEngines Corporation");
46971+MODULE_AUTHOR("Emulex Corporation");
46972 MODULE_LICENSE("GPL");
46973+MODULE_INFO(supported, "external");
46974
46975-static unsigned int rx_frag_size = 2048;
46976-module_param(rx_frag_size, uint, S_IRUGO);
46977-MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
46978+static ushort rx_frag_size = 2048;
46979+static unsigned int num_vfs;
46980+static unsigned int msix = 1;
46981+module_param(rx_frag_size, ushort, S_IRUGO);
46982+module_param(num_vfs, uint, S_IRUGO);
46983+module_param(msix, uint, S_IRUGO);
46984+MODULE_PARM_DESC(rx_frag_size, "Size of receive fragment buffer"
46985+ " - 2048 (default), 4096 or 8192");
46986+MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
46987+MODULE_PARM_DESC(msix, "Enable and disable the MSI"
46988+ "x (By default MSIx is enabled)");
46989+static unsigned int gro = 1;
46990+module_param(gro, uint, S_IRUGO);
46991+MODULE_PARM_DESC(gro, "Enable or Disable GRO. Enabled by default");
46992+
46993+static unsigned int multi_rxq = true;
46994+module_param(multi_rxq, uint, S_IRUGO);
46995+MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
46996
46997 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
46998 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
46999 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
47000 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
47001 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
47002- { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
47003+ /*
47004+ * Lancer is not part of Palau 4.0
47005+ * { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47006+ */
47007 { 0 }
47008 };
47009 MODULE_DEVICE_TABLE(pci, be_dev_ids);
47010
47011+/* UE Status Low CSR */
47012+static char *ue_status_low_desc[] = {
47013+ "CEV",
47014+ "CTX",
47015+ "DBUF",
47016+ "ERX",
47017+ "Host",
47018+ "MPU",
47019+ "NDMA",
47020+ "PTC ",
47021+ "RDMA ",
47022+ "RXF ",
47023+ "RXIPS ",
47024+ "RXULP0 ",
47025+ "RXULP1 ",
47026+ "RXULP2 ",
47027+ "TIM ",
47028+ "TPOST ",
47029+ "TPRE ",
47030+ "TXIPS ",
47031+ "TXULP0 ",
47032+ "TXULP1 ",
47033+ "UC ",
47034+ "WDMA ",
47035+ "TXULP2 ",
47036+ "HOST1 ",
47037+ "P0_OB_LINK ",
47038+ "P1_OB_LINK ",
47039+ "HOST_GPIO ",
47040+ "MBOX ",
47041+ "AXGMAC0",
47042+ "AXGMAC1",
47043+ "JTAG",
47044+ "MPU_INTPEND"
47045+};
47046+
47047+/* UE Status High CSR */
47048+static char *ue_status_hi_desc[] = {
47049+ "LPCMEMHOST",
47050+ "MGMT_MAC",
47051+ "PCS0ONLINE",
47052+ "MPU_IRAM",
47053+ "PCS1ONLINE",
47054+ "PCTL0",
47055+ "PCTL1",
47056+ "PMEM",
47057+ "RR",
47058+ "TXPB",
47059+ "RXPP",
47060+ "XAUI",
47061+ "TXP",
47062+ "ARM",
47063+ "IPC",
47064+ "HOST2",
47065+ "HOST3",
47066+ "HOST4",
47067+ "HOST5",
47068+ "HOST6",
47069+ "HOST7",
47070+ "HOST8",
47071+ "HOST9",
47072+ "NETC",
47073+ "Unknown",
47074+ "Unknown",
47075+ "Unknown",
47076+ "Unknown",
47077+ "Unknown",
47078+ "Unknown",
47079+ "Unknown",
47080+ "Unknown"
47081+};
47082+
47083+static inline bool be_multi_rxq(struct be_adapter *adapter)
47084+{
47085+ return (adapter->num_rx_qs > 1);
47086+}
47087+
47088 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
47089 {
47090 struct be_dma_mem *mem = &q->dma_mem;
47091@@ -69,6 +165,9 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
47092 u32 reg = ioread32(addr);
47093 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
47094
47095+ if (adapter->eeh_err)
47096+ return;
47097+
47098 if (!enabled && enable)
47099 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
47100 else if (enabled && !enable)
47101@@ -84,6 +183,8 @@ static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
47102 u32 val = 0;
47103 val |= qid & DB_RQ_RING_ID_MASK;
47104 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
47105+
47106+ wmb();
47107 iowrite32(val, adapter->db + DB_RQ_OFFSET);
47108 }
47109
47110@@ -92,6 +193,8 @@ static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
47111 u32 val = 0;
47112 val |= qid & DB_TXULP_RING_ID_MASK;
47113 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
47114+
47115+ wmb();
47116 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
47117 }
47118
47119@@ -100,6 +203,12 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
47120 {
47121 u32 val = 0;
47122 val |= qid & DB_EQ_RING_ID_MASK;
47123+ val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
47124+ DB_EQ_RING_ID_EXT_MASK_SHIFT);
47125+
47126+ if (adapter->eeh_err)
47127+ return;
47128+
47129 if (arm)
47130 val |= 1 << DB_EQ_REARM_SHIFT;
47131 if (clear_int)
47132@@ -113,6 +222,12 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
47133 {
47134 u32 val = 0;
47135 val |= qid & DB_CQ_RING_ID_MASK;
47136+ val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
47137+ DB_CQ_RING_ID_EXT_MASK_SHIFT);
47138+
47139+ if (adapter->eeh_err)
47140+ return;
47141+
47142 if (arm)
47143 val |= 1 << DB_CQ_REARM_SHIFT;
47144 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
47145@@ -124,96 +239,250 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
47146 struct be_adapter *adapter = netdev_priv(netdev);
47147 struct sockaddr *addr = p;
47148 int status = 0;
47149+ u8 current_mac[ETH_ALEN];
47150+ u32 pmac_id = adapter->pmac_id;
47151
47152- status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
47153+ if (!is_valid_ether_addr(addr->sa_data))
47154+ return -EADDRNOTAVAIL;
47155+
47156+ status = be_cmd_mac_addr_query(adapter, current_mac,
47157+ MAC_ADDRESS_TYPE_NETWORK, false,
47158+ adapter->if_handle);
47159 if (status)
47160- return status;
47161+ goto err;
47162+
47163+ if (!memcmp(addr->sa_data, current_mac, ETH_ALEN))
47164+ goto done;
47165
47166 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
47167- adapter->if_handle, &adapter->pmac_id);
47168- if (!status)
47169+ adapter->if_handle, &adapter->pmac_id, 0);
47170+
47171+ if (!status) {
47172+ status = be_cmd_pmac_del(adapter, adapter->if_handle,
47173+ pmac_id, 0);
47174 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
47175+ goto done;
47176+ }
47177
47178- return status;
47179+err:
47180+ if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
47181+ return -EPERM;
47182+ else
47183+ dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n",
47184+ addr->sa_data);
47185+done:
47186+ return status;
47187+}
47188+
47189+static void populate_be2_stats(struct be_adapter *adapter)
47190+{
47191+
47192+ struct be_drv_stats *drvs = &adapter->drv_stats;
47193+ struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
47194+ struct be_port_rxf_stats_v0 *port_stats =
47195+ be_port_rxf_stats_from_cmd(adapter);
47196+ struct be_rxf_stats_v0 *rxf_stats =
47197+ be_rxf_stats_from_cmd(adapter);
47198+
47199+ drvs->rx_pause_frames = port_stats->rx_pause_frames;
47200+ drvs->rx_crc_errors = port_stats->rx_crc_errors;
47201+ drvs->rx_control_frames = port_stats->rx_control_frames;
47202+ drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
47203+ drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
47204+ drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
47205+ drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
47206+ drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
47207+ drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
47208+ drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
47209+ drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
47210+ drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
47211+ drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
47212+ drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
47213+ drvs->rx_input_fifo_overflow_drop =
47214+ port_stats->rx_input_fifo_overflow;
47215+ drvs->rx_dropped_header_too_small =
47216+ port_stats->rx_dropped_header_too_small;
47217+ drvs->rx_address_match_errors =
47218+ port_stats->rx_address_match_errors;
47219+ drvs->rx_alignment_symbol_errors =
47220+ port_stats->rx_alignment_symbol_errors;
47221+
47222+ drvs->tx_pauseframes = port_stats->tx_pauseframes;
47223+ drvs->tx_controlframes = port_stats->tx_controlframes;
47224+
47225+ if (adapter->port_num)
47226+ drvs->jabber_events =
47227+ rxf_stats->port1_jabber_events;
47228+ else
47229+ drvs->jabber_events =
47230+ rxf_stats->port0_jabber_events;
47231+ drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
47232+ drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
47233+ drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
47234+ drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
47235+ drvs->forwarded_packets = rxf_stats->forwarded_packets;
47236+ drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
47237+ drvs->rx_drops_no_tpre_descr =
47238+ rxf_stats->rx_drops_no_tpre_descr;
47239+ drvs->rx_drops_too_many_frags =
47240+ rxf_stats->rx_drops_too_many_frags;
47241+ adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
47242+}
47243+
47244+static void populate_be3_stats(struct be_adapter *adapter)
47245+{
47246+ struct be_drv_stats *drvs = &adapter->drv_stats;
47247+ struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
47248+
47249+ struct be_rxf_stats_v1 *rxf_stats =
47250+ be_rxf_stats_from_cmd(adapter);
47251+ struct be_port_rxf_stats_v1 *port_stats =
47252+ be_port_rxf_stats_from_cmd(adapter);
47253+
47254+ drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
47255+ drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
47256+ drvs->rx_pause_frames = port_stats->rx_pause_frames;
47257+ drvs->rx_crc_errors = port_stats->rx_crc_errors;
47258+ drvs->rx_control_frames = port_stats->rx_control_frames;
47259+ drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
47260+ drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
47261+ drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
47262+ drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
47263+ drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
47264+ drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
47265+ drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
47266+ drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
47267+ drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
47268+ drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
47269+ drvs->rx_dropped_header_too_small =
47270+ port_stats->rx_dropped_header_too_small;
47271+ drvs->rx_input_fifo_overflow_drop =
47272+ port_stats->rx_input_fifo_overflow_drop;
47273+ drvs->rx_address_match_errors =
47274+ port_stats->rx_address_match_errors;
47275+ drvs->rx_alignment_symbol_errors =
47276+ port_stats->rx_alignment_symbol_errors;
47277+ drvs->rxpp_fifo_overflow_drop =
47278+ port_stats->rxpp_fifo_overflow_drop;
47279+ drvs->tx_pauseframes = port_stats->tx_pauseframes;
47280+ drvs->tx_controlframes = port_stats->tx_controlframes;
47281+ drvs->jabber_events = port_stats->jabber_events;
47282+ drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
47283+ drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
47284+ drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
47285+ drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
47286+ drvs->forwarded_packets = rxf_stats->forwarded_packets;
47287+ drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
47288+ drvs->rx_drops_no_tpre_descr =
47289+ rxf_stats->rx_drops_no_tpre_descr;
47290+ drvs->rx_drops_too_many_frags =
47291+ rxf_stats->rx_drops_too_many_frags;
47292+ adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
47293+}
47294+
47295+
47296+static void accumulate_16bit_val(u32 *acc, u16 val)
47297+{
47298+#define lo(x) (x & 0xFFFF)
47299+#define hi(x) (x & 0xFFFF0000)
47300+ bool wrapped = val < lo(*acc);
47301+ u32 newacc = hi(*acc) + val;
47302+
47303+ if (wrapped)
47304+ newacc += 65536;
47305+ ACCESS_ONCE_RW(*acc) = newacc;
47306+}
47307+
47308+void be_parse_stats(struct be_adapter *adapter)
47309+{
47310+ struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
47311+ struct be_rx_obj *rxo;
47312+ int i;
47313+
47314+ if (adapter->generation == BE_GEN3) {
47315+ populate_be3_stats(adapter);
47316+ } else {
47317+ populate_be2_stats(adapter);
47318+ }
47319+
47320+ /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
47321+ for_all_rx_queues(adapter, rxo, i) {
47322+ /* below erx HW counter can actually wrap around after
47323+ * 65535. Driver accumulates a 32-bit value
47324+ */
47325+ accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
47326+ (u16)erx->rx_drops_no_fragments[rxo->q.id]);
47327+ }
47328 }
47329
47330 void netdev_stats_update(struct be_adapter *adapter)
47331 {
47332- struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
47333- struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
47334- struct be_port_rxf_stats *port_stats =
47335- &rxf_stats->port[adapter->port_num];
47336- struct net_device_stats *dev_stats = &adapter->stats.net_stats;
47337- struct be_erx_stats *erx_stats = &hw_stats->erx;
47338+ struct be_drv_stats *drvs = &adapter->drv_stats;
47339+ struct net_device_stats *dev_stats = &adapter->net_stats;
47340+ struct be_rx_obj *rxo;
47341+ struct be_tx_obj *txo;
47342+ unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0;
47343+ int i;
47344
47345- dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts;
47346- dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
47347- dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
47348- dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
47349+ for_all_rx_queues(adapter, rxo, i) {
47350+ pkts += rx_stats(rxo)->rx_pkts;
47351+ bytes += rx_stats(rxo)->rx_bytes;
47352+ mcast += rx_stats(rxo)->rx_mcast_pkts;
47353+ drops += rx_stats(rxo)->rx_drops_no_frags;
47354+ }
47355+ dev_stats->rx_packets = pkts;
47356+ dev_stats->rx_bytes = bytes;
47357+ dev_stats->multicast = mcast;
47358+ dev_stats->rx_dropped = drops;
47359+
47360+ pkts = bytes = 0;
47361+ for_all_tx_queues(adapter, txo, i) {
47362+ pkts += tx_stats(txo)->be_tx_pkts;
47363+ bytes += tx_stats(txo)->be_tx_bytes;
47364+ }
47365+ dev_stats->tx_packets = pkts;
47366+ dev_stats->tx_bytes = bytes;
47367
47368 /* bad pkts received */
47369- dev_stats->rx_errors = port_stats->rx_crc_errors +
47370- port_stats->rx_alignment_symbol_errors +
47371- port_stats->rx_in_range_errors +
47372- port_stats->rx_out_range_errors +
47373- port_stats->rx_frame_too_long +
47374- port_stats->rx_dropped_too_small +
47375- port_stats->rx_dropped_too_short +
47376- port_stats->rx_dropped_header_too_small +
47377- port_stats->rx_dropped_tcp_length +
47378- port_stats->rx_dropped_runt +
47379- port_stats->rx_tcp_checksum_errs +
47380- port_stats->rx_ip_checksum_errs +
47381- port_stats->rx_udp_checksum_errs;
47382-
47383- /* no space in linux buffers: best possible approximation */
47384- dev_stats->rx_dropped = erx_stats->rx_drops_no_fragments[0];
47385+ dev_stats->rx_errors = drvs->rx_crc_errors +
47386+ drvs->rx_alignment_symbol_errors +
47387+ drvs->rx_in_range_errors +
47388+ drvs->rx_out_range_errors +
47389+ drvs->rx_frame_too_long +
47390+ drvs->rx_dropped_too_small +
47391+ drvs->rx_dropped_too_short +
47392+ drvs->rx_dropped_header_too_small +
47393+ drvs->rx_dropped_tcp_length +
47394+ drvs->rx_dropped_runt +
47395+ drvs->rx_tcp_checksum_errs +
47396+ drvs->rx_ip_checksum_errs +
47397+ drvs->rx_udp_checksum_errs;
47398
47399 /* detailed rx errors */
47400- dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
47401- port_stats->rx_out_range_errors +
47402- port_stats->rx_frame_too_long;
47403+ dev_stats->rx_length_errors = drvs->rx_in_range_errors +
47404+ drvs->rx_out_range_errors +
47405+ drvs->rx_frame_too_long;
47406
47407- /* receive ring buffer overflow */
47408- dev_stats->rx_over_errors = 0;
47409-
47410- dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
47411+ dev_stats->rx_crc_errors = drvs->rx_crc_errors;
47412
47413 /* frame alignment errors */
47414- dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
47415+ dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
47416
47417 /* receiver fifo overrun */
47418 /* drops_no_pbuf is no per i/f, it's per BE card */
47419- dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
47420- port_stats->rx_input_fifo_overflow +
47421- rxf_stats->rx_drops_no_pbuf;
47422- /* receiver missed packetd */
47423- dev_stats->rx_missed_errors = 0;
47424-
47425- /* packet transmit problems */
47426- dev_stats->tx_errors = 0;
47427-
47428- /* no space available in linux */
47429- dev_stats->tx_dropped = 0;
47430-
47431- dev_stats->multicast = port_stats->rx_multicast_frames;
47432- dev_stats->collisions = 0;
47433-
47434- /* detailed tx_errors */
47435- dev_stats->tx_aborted_errors = 0;
47436- dev_stats->tx_carrier_errors = 0;
47437- dev_stats->tx_fifo_errors = 0;
47438- dev_stats->tx_heartbeat_errors = 0;
47439- dev_stats->tx_window_errors = 0;
47440+ dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
47441+ drvs->rx_input_fifo_overflow_drop +
47442+ drvs->rx_drops_no_pbuf;
47443 }
47444
47445-void be_link_status_update(struct be_adapter *adapter, bool link_up)
47446+void be_link_status_update(struct be_adapter *adapter, int link_status)
47447 {
47448 struct net_device *netdev = adapter->netdev;
47449
47450 /* If link came up or went down */
47451- if (adapter->link_up != link_up) {
47452- if (link_up) {
47453+ if (adapter->link_status != link_status) {
47454+ adapter->link_speed = -1;
47455+ if (link_status == LINK_UP) {
47456 netif_start_queue(netdev);
47457 netif_carrier_on(netdev);
47458 printk(KERN_INFO "%s: Link up\n", netdev->name);
47459@@ -222,15 +491,15 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up)
47460 netif_carrier_off(netdev);
47461 printk(KERN_INFO "%s: Link down\n", netdev->name);
47462 }
47463- adapter->link_up = link_up;
47464+ adapter->link_status = link_status;
47465 }
47466 }
47467
47468 /* Update the EQ delay n BE based on the RX frags consumed / sec */
47469-static void be_rx_eqd_update(struct be_adapter *adapter)
47470+static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
47471 {
47472- struct be_eq_obj *rx_eq = &adapter->rx_eq;
47473- struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
47474+ struct be_eq_obj *rx_eq = &rxo->rx_eq;
47475+ struct be_rx_stats *stats = &rxo->stats;
47476 ulong now = jiffies;
47477 u32 eqd;
47478
47479@@ -247,19 +516,17 @@ static void be_rx_eqd_update(struct be_adapter *adapter)
47480 if ((now - stats->rx_fps_jiffies) < HZ)
47481 return;
47482
47483- stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
47484+ stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
47485 ((now - stats->rx_fps_jiffies) / HZ);
47486
47487 stats->rx_fps_jiffies = now;
47488- stats->be_prev_rx_frags = stats->be_rx_frags;
47489- eqd = stats->be_rx_fps / 110000;
47490+ stats->prev_rx_frags = stats->rx_frags;
47491+ eqd = stats->rx_fps / 110000;
47492 eqd = eqd << 3;
47493 if (eqd > rx_eq->max_eqd)
47494 eqd = rx_eq->max_eqd;
47495 if (eqd < rx_eq->min_eqd)
47496 eqd = rx_eq->min_eqd;
47497- if (eqd < 10)
47498- eqd = 0;
47499 if (eqd != rx_eq->cur_eqd)
47500 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
47501
47502@@ -270,7 +537,7 @@ static struct net_device_stats *be_get_stats(struct net_device *dev)
47503 {
47504 struct be_adapter *adapter = netdev_priv(dev);
47505
47506- return &adapter->stats.net_stats;
47507+ return &adapter->net_stats;
47508 }
47509
47510 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
47511@@ -284,9 +551,9 @@ static u32 be_calc_rate(u64 bytes, unsigned long ticks)
47512 return rate;
47513 }
47514
47515-static void be_tx_rate_update(struct be_adapter *adapter)
47516+static void be_tx_rate_update(struct be_tx_obj *txo)
47517 {
47518- struct be_drvr_stats *stats = drvr_stats(adapter);
47519+ struct be_tx_stats *stats = tx_stats(txo);
47520 ulong now = jiffies;
47521
47522 /* Wrapped around? */
47523@@ -305,10 +572,11 @@ static void be_tx_rate_update(struct be_adapter *adapter)
47524 }
47525 }
47526
47527-static void be_tx_stats_update(struct be_adapter *adapter,
47528+static void be_tx_stats_update(struct be_tx_obj *txo,
47529 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
47530 {
47531- struct be_drvr_stats *stats = drvr_stats(adapter);
47532+ struct be_tx_stats *stats = tx_stats(txo);
47533+
47534 stats->be_tx_reqs++;
47535 stats->be_tx_wrbs += wrb_cnt;
47536 stats->be_tx_bytes += copied;
47537@@ -318,7 +586,8 @@ static void be_tx_stats_update(struct be_adapter *adapter,
47538 }
47539
47540 /* Determine number of WRB entries needed to xmit data in an skb */
47541-static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
47542+static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
47543+ bool *dummy)
47544 {
47545 int cnt = (skb->len > skb->data_len);
47546
47547@@ -326,12 +595,13 @@ static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
47548
47549 /* to account for hdr wrb */
47550 cnt++;
47551- if (cnt & 1) {
47552+ if (lancer_chip(adapter) || !(cnt & 1)) {
47553+ *dummy = false;
47554+ } else {
47555 /* add a dummy to make it an even num */
47556 cnt++;
47557 *dummy = true;
47558- } else
47559- *dummy = false;
47560+ }
47561 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
47562 return cnt;
47563 }
47564@@ -343,17 +613,31 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
47565 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
47566 }
47567
47568-static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
47569- bool vlan, u32 wrb_cnt, u32 len)
47570+static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
47571+ struct sk_buff *skb, u32 wrb_cnt, u32 len)
47572 {
47573+ u16 vlan_tag = 0;
47574+
47575 memset(hdr, 0, sizeof(*hdr));
47576
47577 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
47578
47579- if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
47580+ if (skb_is_gso(skb)) {
47581 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
47582 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
47583 hdr, skb_shinfo(skb)->gso_size);
47584+ if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
47585+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
47586+
47587+ if (lancer_A0_chip(adapter)) {
47588+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
47589+ if (is_tcp_pkt(skb))
47590+ AMAP_SET_BITS(struct amap_eth_hdr_wrb,
47591+ tcpcs, hdr, 1);
47592+ else if (is_udp_pkt(skb))
47593+ AMAP_SET_BITS(struct amap_eth_hdr_wrb,
47594+ udpcs, hdr, 1);
47595+ }
47596 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
47597 if (is_tcp_pkt(skb))
47598 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
47599@@ -361,10 +645,10 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
47600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
47601 }
47602
47603- if (vlan && vlan_tx_tag_present(skb)) {
47604+ if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
47605 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
47606- AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
47607- hdr, vlan_tx_tag_get(skb));
47608+ vlan_tag = be_get_tx_vlan_tag(adapter, skb);
47609+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
47610 }
47611
47612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
47613@@ -374,14 +658,13 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
47614 }
47615
47616
47617-static int make_tx_wrbs(struct be_adapter *adapter,
47618+static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
47619 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
47620 {
47621- u64 busaddr;
47622- u32 i, copied = 0;
47623+ dma_addr_t busaddr;
47624+ int i, copied = 0;
47625 struct pci_dev *pdev = adapter->pdev;
47626 struct sk_buff *first_skb = skb;
47627- struct be_queue_info *txq = &adapter->tx_obj.q;
47628 struct be_eth_wrb *wrb;
47629 struct be_eth_hdr_wrb *hdr;
47630
47631@@ -389,15 +672,11 @@ static int make_tx_wrbs(struct be_adapter *adapter,
47632 atomic_add(wrb_cnt, &txq->used);
47633 queue_head_inc(txq);
47634
47635- if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
47636- dev_err(&pdev->dev, "TX DMA mapping failed\n");
47637- return 0;
47638- }
47639-
47640 if (skb->len > skb->data_len) {
47641- int len = skb->len - skb->data_len;
47642+ int len = skb_headlen(skb);
47643+ busaddr = pci_map_single(pdev, skb->data, len,
47644+ PCI_DMA_TODEVICE);
47645 wrb = queue_head_node(txq);
47646- busaddr = skb_shinfo(skb)->dma_head;
47647 wrb_fill(wrb, busaddr, len);
47648 be_dws_cpu_to_le(wrb, sizeof(*wrb));
47649 queue_head_inc(txq);
47650@@ -407,8 +686,9 @@ static int make_tx_wrbs(struct be_adapter *adapter,
47651 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
47652 struct skb_frag_struct *frag =
47653 &skb_shinfo(skb)->frags[i];
47654-
47655- busaddr = skb_shinfo(skb)->dma_maps[i];
47656+ busaddr = pci_map_page(pdev, frag->page,
47657+ frag->page_offset,
47658+ frag->size, PCI_DMA_TODEVICE);
47659 wrb = queue_head_node(txq);
47660 wrb_fill(wrb, busaddr, frag->size);
47661 be_dws_cpu_to_le(wrb, sizeof(*wrb));
47662@@ -423,8 +703,7 @@ static int make_tx_wrbs(struct be_adapter *adapter,
47663 queue_head_inc(txq);
47664 }
47665
47666- wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
47667- wrb_cnt, copied);
47668+ wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
47669 be_dws_cpu_to_le(hdr, sizeof(*hdr));
47670
47671 return copied;
47672@@ -434,19 +713,70 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
47673 struct net_device *netdev)
47674 {
47675 struct be_adapter *adapter = netdev_priv(netdev);
47676- struct be_tx_obj *tx_obj = &adapter->tx_obj;
47677- struct be_queue_info *txq = &tx_obj->q;
47678+ struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
47679+ struct be_queue_info *txq = &txo->q;
47680 u32 wrb_cnt = 0, copied = 0;
47681 u32 start = txq->head;
47682 bool dummy_wrb, stopped = false;
47683
47684- wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
47685+ if (unlikely((skb_shinfo(skb)->gso_segs > 1) &&
47686+ skb_shinfo(skb)->gso_size && is_ipv6_ext_hdr(skb))) {
47687+ tx_stats(txo)->be_ipv6_ext_hdr_tx_drop++;
47688+ goto tx_drop;
47689+ }
47690
47691- copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
47692+ /* If the skb is a large pkt forwarded to this interface
47693+ * after being LRO'd on another interface, drop the pkt.
47694+ * HW cannot handle such pkts. LRO must be disabled when
47695+ * using the server as a router.
47696+ */
47697+ if (!skb_is_gso(skb)) {
47698+ int eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
47699+ VLAN_ETH_HLEN : ETH_HLEN;
47700+
47701+ if ((skb->len - eth_hdr_len) > adapter->netdev->mtu)
47702+ goto tx_drop;
47703+ }
47704+
47705+ /* The ASIC is calculating checksum for Vlan tagged pkts
47706+ * though CSO is disabled.
47707+ * To work around this, insert the Vlan tag in the driver
47708+ * and donot set the vlan bit, cso bit in the Tx WRB.
47709+ */
47710+ if (unlikely(vlan_tx_tag_present(skb) &&
47711+ ((skb->ip_summed != CHECKSUM_PARTIAL) || (skb->len <= 60)))) {
47712+ /* Bug 28694: Don't embed the host VLAN tag in SKB
47713+ * when UMC mode enabled on that interface
47714+ */
47715+ if (!(adapter->function_mode & UMC_ENABLED)) {
47716+ skb = skb_share_check(skb, GFP_ATOMIC);
47717+ if (unlikely(!skb))
47718+ goto tx_drop;
47719+
47720+ skb = be_vlan_put_tag(skb,
47721+ be_get_tx_vlan_tag(adapter, skb));
47722+ if (unlikely(!skb))
47723+ goto tx_drop;
47724+
47725+ be_reset_skb_tx_vlan(skb);
47726+ }
47727+ }
47728+
47729+ /* Bug 12422: the stack can send us skbs with length more than 65535
47730+ * BE cannot handle such requests. Hack the extra data out and drop it.
47731+ */
47732+ if (skb->len > 65535) {
47733+ int err = __pskb_trim(skb, 65535);
47734+ BUG_ON(err);
47735+ }
47736+
47737+ wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
47738+
47739+ copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
47740 if (copied) {
47741 /* record the sent skb in the sent_skb table */
47742- BUG_ON(tx_obj->sent_skb_list[start]);
47743- tx_obj->sent_skb_list[start] = skb;
47744+ BUG_ON(txo->sent_skb_list[start]);
47745+ txo->sent_skb_list[start] = skb;
47746
47747 /* Ensure txq has space for the next skb; Else stop the queue
47748 * *BEFORE* ringing the tx doorbell, so that we serialze the
47749@@ -454,16 +784,21 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
47750 */
47751 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
47752 txq->len) {
47753- netif_stop_queue(netdev);
47754+ netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
47755 stopped = true;
47756 }
47757
47758 be_txq_notify(adapter, txq->id, wrb_cnt);
47759
47760- be_tx_stats_update(adapter, wrb_cnt, copied,
47761+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
47762+ netdev->trans_start = jiffies;
47763+#endif
47764+
47765+ be_tx_stats_update(txo, wrb_cnt, copied,
47766 skb_shinfo(skb)->gso_segs, stopped);
47767 } else {
47768 txq->head = start;
47769+tx_drop:
47770 dev_kfree_skb_any(skb);
47771 }
47772 return NETDEV_TX_OK;
47773@@ -473,10 +808,12 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
47774 {
47775 struct be_adapter *adapter = netdev_priv(netdev);
47776 if (new_mtu < BE_MIN_MTU ||
47777- new_mtu > BE_MAX_JUMBO_FRAME_SIZE) {
47778+ new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
47779+ (ETH_HLEN + ETH_FCS_LEN))) {
47780 dev_info(&adapter->pdev->dev,
47781 "MTU must be between %d and %d bytes\n",
47782- BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE);
47783+ BE_MIN_MTU,
47784+ (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
47785 return -EINVAL;
47786 }
47787 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
47788@@ -486,17 +823,19 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
47789 }
47790
47791 /*
47792- * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
47793- * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
47794- * set the BE in promiscuous VLAN mode.
47795+ * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
47796+ * If the user configures more, place BE in vlan promiscuous mode.
47797 */
47798-static int be_vid_config(struct be_adapter *adapter)
47799+static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
47800 {
47801 u16 vtag[BE_NUM_VLANS_SUPPORTED];
47802 u16 ntags = 0, i;
47803- int status;
47804+ int status = 0;
47805
47806- if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) {
47807+ /* No need to change the VLAN state if the I/F is in promiscous */
47808+ if (adapter->promiscuous)
47809+ return 0;
47810+ if (adapter->vlans_added <= adapter->max_vlans) {
47811 /* Construct VLAN Table to give to HW */
47812 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
47813 if (adapter->vlan_tag[i]) {
47814@@ -504,47 +843,46 @@ static int be_vid_config(struct be_adapter *adapter)
47815 ntags++;
47816 }
47817 }
47818- status = be_cmd_vlan_config(adapter, adapter->if_handle,
47819- vtag, ntags, 1, 0);
47820+ /* Send command only if there is something to be programmed */
47821+ if (ntags)
47822+ status = be_cmd_vlan_config(adapter, adapter->if_handle,
47823+ vtag, ntags, 1, 0);
47824 } else {
47825 status = be_cmd_vlan_config(adapter, adapter->if_handle,
47826- NULL, 0, 1, 1);
47827+ NULL, 0, 1, 1);
47828 }
47829+
47830 return status;
47831 }
47832
47833 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
47834 {
47835 struct be_adapter *adapter = netdev_priv(netdev);
47836- struct be_eq_obj *rx_eq = &adapter->rx_eq;
47837- struct be_eq_obj *tx_eq = &adapter->tx_eq;
47838
47839- be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
47840- be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
47841 adapter->vlan_grp = grp;
47842- be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
47843- be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
47844 }
47845
47846 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
47847 {
47848 struct be_adapter *adapter = netdev_priv(netdev);
47849
47850- adapter->num_vlans++;
47851+ adapter->vlans_added++;
47852+
47853 adapter->vlan_tag[vid] = 1;
47854-
47855- be_vid_config(adapter);
47856+ if (adapter->vlans_added <= (adapter->max_vlans + 1))
47857+ be_vid_config(adapter, false, 0);
47858 }
47859
47860 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
47861 {
47862 struct be_adapter *adapter = netdev_priv(netdev);
47863
47864- adapter->num_vlans--;
47865- adapter->vlan_tag[vid] = 0;
47866-
47867+ adapter->vlans_added--;
47868 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
47869- be_vid_config(adapter);
47870+
47871+ adapter->vlan_tag[vid] = 0;
47872+ if (adapter->vlans_added <= adapter->max_vlans)
47873+ be_vid_config(adapter, false, 0);
47874 }
47875
47876 static void be_set_multicast_list(struct net_device *netdev)
47877@@ -552,7 +890,7 @@ static void be_set_multicast_list(struct net_device *netdev)
47878 struct be_adapter *adapter = netdev_priv(netdev);
47879
47880 if (netdev->flags & IFF_PROMISC) {
47881- be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
47882+ be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
47883 adapter->promiscuous = true;
47884 goto done;
47885 }
47886@@ -560,81 +898,244 @@ static void be_set_multicast_list(struct net_device *netdev)
47887 /* BE was previously in promiscous mode; disable it */
47888 if (adapter->promiscuous) {
47889 adapter->promiscuous = false;
47890- be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
47891+ be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
47892+
47893+ if (adapter->vlans_added)
47894+ be_vid_config(adapter, false, 0);
47895 }
47896
47897- if (netdev->flags & IFF_ALLMULTI) {
47898- be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0);
47899+ /* Enable multicast promisc if num configured exceeds what we support */
47900+ if (netdev->flags & IFF_ALLMULTI ||
47901+ netdev_mc_count(netdev) > BE_MAX_MC) {
47902+ be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
47903 goto done;
47904 }
47905
47906- be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
47907- netdev->mc_count);
47908+ be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
47909 done:
47910 return;
47911 }
47912
47913-static void be_rx_rate_update(struct be_adapter *adapter)
47914+#ifdef HAVE_SRIOV_CONFIG
47915+static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
47916 {
47917- struct be_drvr_stats *stats = drvr_stats(adapter);
47918+ struct be_adapter *adapter = netdev_priv(netdev);
47919+ int status;
47920+
47921+ if (adapter->num_vfs == 0)
47922+ return -EPERM;
47923+
47924+ if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
47925+ return -EINVAL;
47926+
47927+ if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
47928+ status = be_cmd_pmac_del(adapter,
47929+ adapter->vf_cfg[vf].vf_if_handle,
47930+ adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
47931+
47932+ status = be_cmd_pmac_add(adapter, mac,
47933+ adapter->vf_cfg[vf].vf_if_handle,
47934+ &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
47935+
47936+ if (status)
47937+ dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
47938+ mac, vf);
47939+ else
47940+ memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
47941+
47942+ return status;
47943+}
47944+
47945+static int be_get_vf_config(struct net_device *netdev, int vf,
47946+ struct ifla_vf_info *vi)
47947+{
47948+ struct be_adapter *adapter = netdev_priv(netdev);
47949+
47950+ if (adapter->num_vfs == 0)
47951+ return -EPERM;
47952+
47953+ if (vf >= adapter->num_vfs)
47954+ return -EINVAL;
47955+
47956+ vi->vf = vf;
47957+ vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
47958+ vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag & VLAN_VID_MASK;
47959+ vi->qos = adapter->vf_cfg[vf].vf_vlan_tag >> VLAN_PRIO_SHIFT;
47960+ memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
47961+
47962+ return 0;
47963+}
47964+
47965+/*
47966+ * Entry point to configure vlan behavior for a VF.
47967+ * 1. By default a VF is vlan Challenged.
47968+ * 2. It may or may not have Transparent Tagging enabled.
47969+ * 3. Vlan privilege for a VF can be toggled using special VID 4095.
47970+ * 4. When removing the Vlan privilege for a VF there is no need set default vid
47971+ * 5. Transparent Tagging configured for a VF resets its Vlan privilege
47972+ * 6. To disable the current Transparet Tagging for a VF:
47973+ * 6a. run the last iproute command with vlan set to 0.
47974+ * 6b. programing the default vid will disable Transparent Tagging in ARM/ASIC
47975+ */
47976+static int be_set_vf_vlan(struct net_device *netdev,
47977+ int vf, u16 vlan, u8 qos)
47978+{
47979+ struct be_adapter *adapter = netdev_priv(netdev);
47980+ int status = 0;
47981+ u32 en = 0;
47982+
47983+ if (adapter->num_vfs == 0)
47984+ return -EPERM;
47985+
47986+ if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
47987+ return -EINVAL;
47988+
47989+ status = be_cmd_get_fn_privileges(adapter, &en, vf + 1);
47990+ if (status)
47991+ goto sts;
47992+
47993+ if (vlan == 4095) {
47994+ if (en & BE_PRIV_FILTMGMT) {
47995+ /* Knock off filtering privileges */
47996+ en &= ~BE_PRIV_FILTMGMT;
47997+ } else {
47998+ en |= BE_PRIV_FILTMGMT;
47999+ /* Transparent Tagging is currently enabled, Reset it */
48000+ if (adapter->vf_cfg[vf].vf_vlan_tag) {
48001+ adapter->vf_cfg[vf].vf_vlan_tag = 0;
48002+ vlan = adapter->vf_cfg[vf].vf_def_vid;
48003+ be_cmd_set_hsw_config(adapter, vlan, vf + 1,
48004+ adapter->vf_cfg[vf].vf_if_handle);
48005+ }
48006+ }
48007+
48008+ adapter->vf_cfg[vf].vf_vlan_tag = 0;
48009+ status = be_cmd_set_fn_privileges(adapter, en, NULL, vf + 1);
48010+
48011+ goto sts;
48012+ }
48013+
48014+ if (vlan || qos) {
48015+ if (en & BE_PRIV_FILTMGMT) {
48016+ /* Check privilege and reset it to default */
48017+ en &= ~BE_PRIV_FILTMGMT;
48018+ be_cmd_set_fn_privileges(adapter, en, NULL, vf + 1);
48019+ }
48020+
48021+ vlan |= qos << VLAN_PRIO_SHIFT;
48022+ if (adapter->vf_cfg[vf].vf_vlan_tag != vlan) {
48023+ /* If this is new value, program it. Else skip. */
48024+ adapter->vf_cfg[vf].vf_vlan_tag = vlan;
48025+
48026+ status = be_cmd_set_hsw_config(adapter, vlan,
48027+ vf + 1, adapter->vf_cfg[vf].vf_if_handle);
48028+ }
48029+
48030+ } else {
48031+ /* Reset Transparent Vlan Tagging. */
48032+ adapter->vf_cfg[vf].vf_vlan_tag = 0;
48033+ vlan = adapter->vf_cfg[vf].vf_def_vid;
48034+ status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
48035+ adapter->vf_cfg[vf].vf_if_handle);
48036+ }
48037+
48038+sts:
48039+ if (status)
48040+ dev_info(&adapter->pdev->dev,
48041+ "VLAN %d config on VF %d failed\n", vlan, vf);
48042+ return status;
48043+}
48044+
48045+static int be_set_vf_tx_rate(struct net_device *netdev,
48046+ int vf, int rate)
48047+{
48048+ struct be_adapter *adapter = netdev_priv(netdev);
48049+ int status = 0;
48050+
48051+ if (adapter->num_vfs == 0)
48052+ return -EPERM;
48053+
48054+ if ((vf >= adapter->num_vfs) || (rate < 0))
48055+ return -EINVAL;
48056+
48057+ if (rate > 10000)
48058+ rate = 10000;
48059+
48060+ adapter->vf_cfg[vf].vf_tx_rate = rate;
48061+ status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
48062+
48063+ if (status)
48064+ dev_info(&adapter->pdev->dev,
48065+ "tx rate %d on VF %d failed\n", rate, vf);
48066+ return status;
48067+}
48068+#endif /* HAVE_SRIOV_CONFIG */
48069+
48070+static void be_rx_rate_update(struct be_rx_obj *rxo)
48071+{
48072+ struct be_rx_stats *stats = &rxo->stats;
48073 ulong now = jiffies;
48074
48075 /* Wrapped around */
48076- if (time_before(now, stats->be_rx_jiffies)) {
48077- stats->be_rx_jiffies = now;
48078+ if (time_before(now, stats->rx_jiffies)) {
48079+ stats->rx_jiffies = now;
48080 return;
48081 }
48082
48083 /* Update the rate once in two seconds */
48084- if ((now - stats->be_rx_jiffies) < 2 * HZ)
48085+ if ((now - stats->rx_jiffies) < 2 * HZ)
48086 return;
48087
48088- stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
48089- - stats->be_rx_bytes_prev,
48090- now - stats->be_rx_jiffies);
48091- stats->be_rx_jiffies = now;
48092- stats->be_rx_bytes_prev = stats->be_rx_bytes;
48093+ stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
48094+ now - stats->rx_jiffies);
48095+ stats->rx_jiffies = now;
48096+ stats->rx_bytes_prev = stats->rx_bytes;
48097 }
48098
48099-static void be_rx_stats_update(struct be_adapter *adapter,
48100- u32 pktsize, u16 numfrags)
48101+static void be_rx_stats_update(struct be_rx_obj *rxo,
48102+ struct be_rx_compl_info *rxcp)
48103 {
48104- struct be_drvr_stats *stats = drvr_stats(adapter);
48105+ struct be_rx_stats *stats = &rxo->stats;
48106
48107- stats->be_rx_compl++;
48108- stats->be_rx_frags += numfrags;
48109- stats->be_rx_bytes += pktsize;
48110- stats->be_rx_pkts++;
48111+ stats->rx_compl++;
48112+ stats->rx_frags += rxcp->num_rcvd;
48113+ stats->rx_bytes += rxcp->pkt_size;
48114+ stats->rx_pkts++;
48115+ if (rxcp->pkt_type == BE_MULTICAST_PACKET)
48116+ stats->rx_mcast_pkts++;
48117+ if (rxcp->err)
48118+ stats->rxcp_err++;
48119 }
48120
48121-static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
48122+static inline bool csum_passed(struct be_rx_compl_info *rxcp)
48123 {
48124- u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
48125-
48126- l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
48127- ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
48128- ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
48129- if (ip_version) {
48130- tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
48131- udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
48132- }
48133- ipv6_chk = (ip_version && (tcpf || udpf));
48134-
48135- return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
48136+ /* L4 checksum is not reliable for non TCP/UDP packets.
48137+ * Also ignore ipcksm for ipv6 pkts */
48138+ return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
48139+ (rxcp->ip_csum || rxcp->ipv6);
48140 }
48141
48142 static struct be_rx_page_info *
48143-get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
48144+get_rx_page_info(struct be_adapter *adapter, struct be_rx_obj *rxo,
48145+ u16 frag_idx)
48146 {
48147 struct be_rx_page_info *rx_page_info;
48148- struct be_queue_info *rxq = &adapter->rx_obj.q;
48149+ struct be_queue_info *rxq = &rxo->q;
48150
48151- rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
48152- BUG_ON(!rx_page_info->page);
48153+ rx_page_info = &rxo->page_info_tbl[frag_idx];
48154+ if (!rx_page_info->page) {
48155+ printk(KERN_EMERG "curr_idx=%d prev_dix=%d rxq->head=%d\n",
48156+ frag_idx, rxo->prev_frag_idx, rxq->head);
48157+ BUG_ON(!rx_page_info->page);
48158+ }
48159
48160- if (rx_page_info->last_page_user)
48161+ if (rx_page_info->last_page_user) {
48162 pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
48163 adapter->big_page_size, PCI_DMA_FROMDEVICE);
48164+ rx_page_info->last_page_user = false;
48165+ }
48166+
48167+ rxo->prev_frag_idx = frag_idx;
48168
48169 atomic_dec(&rxq->used);
48170 return rx_page_info;
48171@@ -642,20 +1143,26 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
48172
48173 /* Throwaway the data in the Rx completion */
48174 static void be_rx_compl_discard(struct be_adapter *adapter,
48175- struct be_eth_rx_compl *rxcp)
48176+ struct be_rx_obj *rxo,
48177+ struct be_rx_compl_info *rxcp)
48178 {
48179- struct be_queue_info *rxq = &adapter->rx_obj.q;
48180+ struct be_queue_info *rxq = &rxo->q;
48181 struct be_rx_page_info *page_info;
48182- u16 rxq_idx, i, num_rcvd;
48183+ u16 i;
48184+ bool oob_error;
48185+ u16 num_rcvd = rxcp->num_rcvd;
48186
48187- rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
48188- num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
48189+ oob_error = lancer_A0_chip(adapter) && rxcp->err;
48190+
48191+ /* In case of OOB error num_rcvd will be 1 more than actual */
48192+ if (oob_error && num_rcvd)
48193+ num_rcvd -= 1;
48194
48195 for (i = 0; i < num_rcvd; i++) {
48196- page_info = get_rx_page_info(adapter, rxq_idx);
48197+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
48198 put_page(page_info->page);
48199 memset(page_info, 0, sizeof(*page_info));
48200- index_inc(&rxq_idx, rxq->len);
48201+ index_inc(&rxcp->rxq_idx, rxq->len);
48202 }
48203 }
48204
48205@@ -663,29 +1170,24 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
48206 * skb_fill_rx_data forms a complete skb for an ether frame
48207 * indicated by rxcp.
48208 */
48209-static void skb_fill_rx_data(struct be_adapter *adapter,
48210- struct sk_buff *skb, struct be_eth_rx_compl *rxcp)
48211+static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
48212+ struct sk_buff *skb, struct be_rx_compl_info *rxcp)
48213 {
48214- struct be_queue_info *rxq = &adapter->rx_obj.q;
48215+ struct be_queue_info *rxq = &rxo->q;
48216 struct be_rx_page_info *page_info;
48217- u16 rxq_idx, i, num_rcvd, j;
48218- u32 pktsize, hdr_len, curr_frag_len, size;
48219+ u16 i, j;
48220+ u16 hdr_len, curr_frag_len, remaining;
48221 u8 *start;
48222
48223- rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
48224- pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
48225- num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
48226-
48227- page_info = get_rx_page_info(adapter, rxq_idx);
48228-
48229+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
48230 start = page_address(page_info->page) + page_info->page_offset;
48231 prefetch(start);
48232
48233 /* Copy data in the first descriptor of this completion */
48234- curr_frag_len = min(pktsize, rx_frag_size);
48235+ curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
48236
48237 /* Copy the header portion into skb_data */
48238- hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
48239+ hdr_len = min(BE_HDR_LEN, curr_frag_len);
48240 memcpy(skb->data, start, hdr_len);
48241 skb->len = curr_frag_len;
48242 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
48243@@ -702,21 +1204,19 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
48244 skb->data_len = curr_frag_len - hdr_len;
48245 skb->tail += hdr_len;
48246 }
48247- memset(page_info, 0, sizeof(*page_info));
48248+ page_info->page = NULL;
48249
48250- if (pktsize <= rx_frag_size) {
48251- BUG_ON(num_rcvd != 1);
48252- goto done;
48253+ if (rxcp->pkt_size <= rx_frag_size) {
48254+ BUG_ON(rxcp->num_rcvd != 1);
48255+ return;
48256 }
48257
48258 /* More frags present for this completion */
48259- size = pktsize;
48260- for (i = 1, j = 0; i < num_rcvd; i++) {
48261- size -= curr_frag_len;
48262- index_inc(&rxq_idx, rxq->len);
48263- page_info = get_rx_page_info(adapter, rxq_idx);
48264-
48265- curr_frag_len = min(size, rx_frag_size);
48266+ index_inc(&rxcp->rxq_idx, rxq->len);
48267+ remaining = rxcp->pkt_size - curr_frag_len;
48268+ for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
48269+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
48270+ curr_frag_len = min(remaining, rx_frag_size);
48271
48272 /* Coalesce all frags from the same physical page in one slot */
48273 if (page_info->page_offset == 0) {
48274@@ -735,99 +1235,122 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
48275 skb->len += curr_frag_len;
48276 skb->data_len += curr_frag_len;
48277
48278- memset(page_info, 0, sizeof(*page_info));
48279+ remaining -= curr_frag_len;
48280+ index_inc(&rxcp->rxq_idx, rxq->len);
48281+ page_info->page = NULL;
48282 }
48283 BUG_ON(j > MAX_SKB_FRAGS);
48284-
48285-done:
48286- be_rx_stats_update(adapter, pktsize, num_rcvd);
48287- return;
48288 }
48289
48290-/* Process the RX completion indicated by rxcp when GRO is disabled */
48291+/* Process the RX completion indicated by rxcp when LRO is disabled */
48292 static void be_rx_compl_process(struct be_adapter *adapter,
48293- struct be_eth_rx_compl *rxcp)
48294+ struct be_rx_obj *rxo,
48295+ struct be_rx_compl_info *rxcp)
48296 {
48297 struct sk_buff *skb;
48298- u32 vlanf, vid;
48299- u8 vtm;
48300
48301- vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
48302- vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
48303-
48304- /* vlanf could be wrongly set in some cards.
48305- * ignore if vtm is not set */
48306- if ((adapter->cap == 0x400) && !vtm)
48307- vlanf = 0;
48308-
48309- skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
48310- if (!skb) {
48311+ skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
48312+ if (unlikely(!skb)) {
48313 if (net_ratelimit())
48314 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
48315- be_rx_compl_discard(adapter, rxcp);
48316+ be_rx_compl_discard(adapter, rxo, rxcp);
48317 return;
48318 }
48319
48320- skb_reserve(skb, NET_IP_ALIGN);
48321+ skb_fill_rx_data(adapter, rxo, skb, rxcp);
48322
48323- skb_fill_rx_data(adapter, skb, rxcp);
48324-
48325- if (do_pkt_csum(rxcp, adapter->rx_csum))
48326- skb->ip_summed = CHECKSUM_NONE;
48327- else
48328+ if (likely(adapter->rx_csum && csum_passed(rxcp)))
48329 skb->ip_summed = CHECKSUM_UNNECESSARY;
48330+ else
48331+ skb->ip_summed = CHECKSUM_NONE;
48332
48333 skb->truesize = skb->len + sizeof(struct sk_buff);
48334+ if (unlikely(rxcp->vlanf) &&
48335+ unlikely(!vlan_configured(adapter))) {
48336+ __vlan_put_tag(skb, rxcp->vlan_tag);
48337+ }
48338 skb->protocol = eth_type_trans(skb, adapter->netdev);
48339 skb->dev = adapter->netdev;
48340
48341- if (vlanf) {
48342- if (!adapter->vlan_grp || adapter->num_vlans == 0) {
48343- kfree_skb(skb);
48344- return;
48345- }
48346- vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
48347- vid = be16_to_cpu(vid);
48348- vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
48349- } else {
48350+ if (unlikely(rxcp->vlanf) &&
48351+ vlan_configured(adapter))
48352+ vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
48353+ rxcp->vlan_tag);
48354+ else
48355 netif_receive_skb(skb);
48356+
48357+ return;
48358+}
48359+
48360+/* Process the RX completion indicated by rxcp when LRO is enabled */
48361+static void be_rx_compl_process_lro(struct be_adapter *adapter,
48362+ struct be_rx_obj *rxo,
48363+ struct be_rx_compl_info *rxcp)
48364+{
48365+ struct be_rx_page_info *page_info;
48366+ struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
48367+ struct be_queue_info *rxq = &rxo->q;
48368+ u16 remaining, curr_frag_len;
48369+ u16 i, j;
48370+
48371+ remaining = rxcp->pkt_size;
48372+ for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
48373+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
48374+
48375+ curr_frag_len = min(remaining, rx_frag_size);
48376+
48377+ /* Coalesce all frags from the same physical page in one slot */
48378+ if (i == 0 || page_info->page_offset == 0) {
48379+ /* First frag or Fresh page */
48380+ j++;
48381+ rx_frags[j].page = page_info->page;
48382+ rx_frags[j].page_offset = page_info->page_offset;
48383+ rx_frags[j].size = 0;
48384+ } else {
48385+ put_page(page_info->page);
48386+ }
48387+ rx_frags[j].size += curr_frag_len;
48388+
48389+ remaining -= curr_frag_len;
48390+ index_inc(&rxcp->rxq_idx, rxq->len);
48391+ memset(page_info, 0, sizeof(*page_info));
48392+ }
48393+ BUG_ON(j > MAX_SKB_FRAGS);
48394+
48395+ if (likely(!rxcp->vlanf)) {
48396+ lro_receive_frags(&rxo->lro_mgr, rx_frags, rxcp->pkt_size,
48397+ rxcp->pkt_size, NULL, 0);
48398+ } else {
48399+ lro_vlan_hwaccel_receive_frags(&rxo->lro_mgr, rx_frags,
48400+ rxcp->pkt_size, rxcp->pkt_size, adapter->vlan_grp,
48401+ rxcp->vlan_tag, NULL, 0);
48402 }
48403
48404 return;
48405 }
48406
48407 /* Process the RX completion indicated by rxcp when GRO is enabled */
48408-static void be_rx_compl_process_gro(struct be_adapter *adapter,
48409- struct be_eth_rx_compl *rxcp)
48410+void be_rx_compl_process_gro(struct be_adapter *adapter,
48411+ struct be_rx_obj *rxo,
48412+ struct be_rx_compl_info *rxcp)
48413 {
48414+#ifdef NETIF_F_GRO
48415 struct be_rx_page_info *page_info;
48416 struct sk_buff *skb = NULL;
48417- struct be_queue_info *rxq = &adapter->rx_obj.q;
48418- struct be_eq_obj *eq_obj = &adapter->rx_eq;
48419- u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
48420- u16 i, rxq_idx = 0, vid, j;
48421- u8 vtm;
48422-
48423- num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
48424- pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
48425- vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
48426- rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
48427- vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
48428-
48429- /* vlanf could be wrongly set in some cards.
48430- * ignore if vtm is not set */
48431- if ((adapter->cap == 0x400) && !vtm)
48432- vlanf = 0;
48433+ struct be_queue_info *rxq = &rxo->q;
48434+ struct be_eq_obj *eq_obj = &rxo->rx_eq;
48435+ u16 remaining, curr_frag_len;
48436+ u16 i, j;
48437
48438 skb = napi_get_frags(&eq_obj->napi);
48439 if (!skb) {
48440- be_rx_compl_discard(adapter, rxcp);
48441+ be_rx_compl_discard(adapter, rxo, rxcp);
48442 return;
48443 }
48444
48445- remaining = pkt_size;
48446- for (i = 0, j = -1; i < num_rcvd; i++) {
48447- page_info = get_rx_page_info(adapter, rxq_idx);
48448+ remaining = rxcp->pkt_size;
48449+ for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
48450+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
48451
48452 curr_frag_len = min(remaining, rx_frag_size);
48453
48454@@ -845,55 +1368,129 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
48455 skb_shinfo(skb)->frags[j].size += curr_frag_len;
48456
48457 remaining -= curr_frag_len;
48458- index_inc(&rxq_idx, rxq->len);
48459+ index_inc(&rxcp->rxq_idx, rxq->len);
48460 memset(page_info, 0, sizeof(*page_info));
48461 }
48462 BUG_ON(j > MAX_SKB_FRAGS);
48463
48464 skb_shinfo(skb)->nr_frags = j + 1;
48465- skb->len = pkt_size;
48466- skb->data_len = pkt_size;
48467- skb->truesize += pkt_size;
48468+ skb->len = rxcp->pkt_size;
48469+ skb->data_len = rxcp->pkt_size;
48470+ skb->truesize += rxcp->pkt_size;
48471 skb->ip_summed = CHECKSUM_UNNECESSARY;
48472
48473- if (likely(!vlanf)) {
48474+ if (likely(!rxcp->vlanf))
48475 napi_gro_frags(&eq_obj->napi);
48476- } else {
48477- vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
48478- vid = be16_to_cpu(vid);
48479+ else
48480+ vlan_gro_frags(&eq_obj->napi,
48481+ adapter->vlan_grp, rxcp->vlan_tag);
48482+#endif
48483
48484- if (!adapter->vlan_grp || adapter->num_vlans == 0)
48485- return;
48486-
48487- vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
48488- }
48489-
48490- be_rx_stats_update(adapter, pkt_size, num_rcvd);
48491 return;
48492 }
48493
48494-static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
48495+static void be_parse_rx_compl_v1(struct be_adapter *adapter,
48496+ struct be_eth_rx_compl *compl,
48497+ struct be_rx_compl_info *rxcp)
48498 {
48499- struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
48500+ rxcp->pkt_size =
48501+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
48502+ rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
48503+ rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
48504+ rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
48505+ rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
48506+ rxcp->ip_csum =
48507+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
48508+ rxcp->l4_csum =
48509+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
48510+ rxcp->ipv6 =
48511+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
48512+ rxcp->rxq_idx =
48513+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
48514+ rxcp->num_rcvd =
48515+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
48516+ rxcp->pkt_type =
48517+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
48518+ if (rxcp->vlanf) {
48519+ rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
48520+ compl);
48521+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
48522+ vlan_tag, compl);
48523+ }
48524+ rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
48525+}
48526
48527- if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
48528+static void be_parse_rx_compl_v0(struct be_adapter *adapter,
48529+ struct be_eth_rx_compl *compl,
48530+ struct be_rx_compl_info *rxcp)
48531+{
48532+ rxcp->pkt_size =
48533+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
48534+ rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
48535+ rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
48536+ rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
48537+ rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
48538+ rxcp->ip_csum =
48539+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
48540+ rxcp->l4_csum =
48541+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
48542+ rxcp->ipv6 =
48543+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
48544+ rxcp->rxq_idx =
48545+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
48546+ rxcp->num_rcvd =
48547+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
48548+ rxcp->pkt_type =
48549+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
48550+ if (rxcp->vlanf) {
48551+ rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
48552+ compl);
48553+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
48554+ vlan_tag, compl);
48555+ }
48556+ rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
48557+}
48558+
48559+static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
48560+{
48561+ struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
48562+ struct be_rx_compl_info *rxcp = &rxo->rxcp;
48563+ struct be_adapter *adapter = rxo->adapter;
48564+
48565+ /* For checking the valid bit it is Ok to use either definition as the
48566+ * valid bit is at the same position in both v0 and v1 Rx compl */
48567+ if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
48568 return NULL;
48569
48570- be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
48571+ rmb();
48572+ be_dws_le_to_cpu(compl, sizeof(*compl));
48573
48574- queue_tail_inc(&adapter->rx_obj.cq);
48575+ if (adapter->be3_native)
48576+ be_parse_rx_compl_v1(adapter, compl, rxcp);
48577+ else
48578+ be_parse_rx_compl_v0(adapter, compl, rxcp);
48579+
48580+ if (rxcp->vlanf) {
48581+ /* vlanf could be wrongly set in some cards.
48582+ * ignore if vtm is not set */
48583+ if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
48584+ rxcp->vlanf = 0;
48585+
48586+ if (!lancer_chip(adapter))
48587+ rxcp->vlan_tag = swab16(rxcp->vlan_tag);
48588+
48589+ if ((adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK)) &&
48590+ !adapter->vlan_tag[rxcp->vlan_tag])
48591+ rxcp->vlanf = 0;
48592+ }
48593+
48594+ /* As the compl has been parsed, reset it; we wont touch it again */
48595+ compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
48596+
48597+ queue_tail_inc(&rxo->cq);
48598 return rxcp;
48599 }
48600
48601-/* To reset the valid bit, we need to reset the whole word as
48602- * when walking the queue the valid entries are little-endian
48603- * and invalid entries are host endian
48604- */
48605-static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
48606-{
48607- rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
48608-}
48609-
48610 static inline struct page *be_alloc_pages(u32 size)
48611 {
48612 gfp_t alloc_flags = GFP_ATOMIC;
48613@@ -907,11 +1504,12 @@ static inline struct page *be_alloc_pages(u32 size)
48614 * Allocate a page, split it to fragments of size rx_frag_size and post as
48615 * receive buffers to BE
48616 */
48617-static void be_post_rx_frags(struct be_adapter *adapter)
48618+static void be_post_rx_frags(struct be_rx_obj *rxo)
48619 {
48620- struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
48621- struct be_rx_page_info *page_info = NULL;
48622- struct be_queue_info *rxq = &adapter->rx_obj.q;
48623+ struct be_adapter *adapter = rxo->adapter;
48624+ struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
48625+ struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
48626+ struct be_queue_info *rxq = &rxo->q;
48627 struct page *pagep = NULL;
48628 struct be_eth_rx_d *rxd;
48629 u64 page_dmaaddr = 0, frag_dmaaddr;
48630@@ -922,7 +1520,7 @@ static void be_post_rx_frags(struct be_adapter *adapter)
48631 if (!pagep) {
48632 pagep = be_alloc_pages(adapter->big_page_size);
48633 if (unlikely(!pagep)) {
48634- drvr_stats(adapter)->be_ethrx_post_fail++;
48635+ rxo->stats.rx_post_fail++;
48636 break;
48637 }
48638 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
48639@@ -941,7 +1539,6 @@ static void be_post_rx_frags(struct be_adapter *adapter)
48640 rxd = queue_head_node(rxq);
48641 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
48642 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
48643- queue_head_inc(rxq);
48644
48645 /* Any space left in the current big page for another frag? */
48646 if ((page_offset + rx_frag_size + rx_frag_size) >
48647@@ -949,17 +1546,24 @@ static void be_post_rx_frags(struct be_adapter *adapter)
48648 pagep = NULL;
48649 page_info->last_page_user = true;
48650 }
48651+
48652+ prev_page_info = page_info;
48653+ queue_head_inc(rxq);
48654 page_info = &page_info_tbl[rxq->head];
48655 }
48656 if (pagep)
48657- page_info->last_page_user = true;
48658+ prev_page_info->last_page_user = true;
48659
48660+ /* Ensure that posting buffers is the last thing done by this
48661+ * routine to avoid racing between rx bottom-half and
48662+ * be_worker (process) contexts.
48663+ */
48664 if (posted) {
48665 atomic_add(posted, &rxq->used);
48666 be_rxq_notify(adapter, rxq->id, posted);
48667 } else if (atomic_read(&rxq->used) == 0) {
48668 /* Let be_worker replenish when memory is available */
48669- adapter->rx_post_starved = true;
48670+ rxo->rx_post_starved = true;
48671 }
48672
48673 return;
48674@@ -972,6 +1576,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
48675 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
48676 return NULL;
48677
48678+ rmb();
48679 be_dws_le_to_cpu(txcp, sizeof(*txcp));
48680
48681 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
48682@@ -980,11 +1585,14 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
48683 return txcp;
48684 }
48685
48686-static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
48687+static u16 be_tx_compl_process(struct be_adapter *adapter,
48688+ struct be_tx_obj *txo, u16 last_index)
48689 {
48690- struct be_queue_info *txq = &adapter->tx_obj.q;
48691- struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
48692+ struct be_queue_info *txq = &txo->q;
48693+ struct be_eth_wrb *wrb;
48694+ struct sk_buff **sent_skbs = txo->sent_skb_list;
48695 struct sk_buff *sent_skb;
48696+ u64 busaddr;
48697 u16 cur_index, num_wrbs = 0;
48698
48699 cur_index = txq->tail;
48700@@ -992,15 +1600,31 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
48701 BUG_ON(!sent_skb);
48702 sent_skbs[cur_index] = NULL;
48703
48704- do {
48705+ wrb = queue_tail_node(txq);
48706+ be_dws_le_to_cpu(wrb, sizeof(*wrb));
48707+ busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
48708+ if (busaddr != 0) {
48709+ pci_unmap_single(adapter->pdev, busaddr,
48710+ wrb->frag_len, PCI_DMA_TODEVICE);
48711+ }
48712+ num_wrbs++;
48713+ queue_tail_inc(txq);
48714+
48715+ while (cur_index != last_index) {
48716 cur_index = txq->tail;
48717+ wrb = queue_tail_node(txq);
48718+ be_dws_le_to_cpu(wrb, sizeof(*wrb));
48719+ busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
48720+ if (busaddr != 0) {
48721+ pci_unmap_page(adapter->pdev, busaddr,
48722+ wrb->frag_len, PCI_DMA_TODEVICE);
48723+ }
48724 num_wrbs++;
48725 queue_tail_inc(txq);
48726- } while (cur_index != last_index);
48727+ }
48728
48729- atomic_sub(num_wrbs, &txq->used);
48730- skb_dma_unmap(&adapter->pdev->dev, sent_skb, DMA_TO_DEVICE);
48731 kfree_skb(sent_skb);
48732+ return num_wrbs;
48733 }
48734
48735 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
48736@@ -1010,13 +1634,15 @@ static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
48737 if (!eqe->evt)
48738 return NULL;
48739
48740+ rmb();
48741 eqe->evt = le32_to_cpu(eqe->evt);
48742 queue_tail_inc(&eq_obj->q);
48743 return eqe;
48744 }
48745
48746 static int event_handle(struct be_adapter *adapter,
48747- struct be_eq_obj *eq_obj)
48748+ struct be_eq_obj *eq_obj,
48749+ bool rearm)
48750 {
48751 struct be_eq_entry *eqe;
48752 u16 num = 0;
48753@@ -1029,7 +1655,10 @@ static int event_handle(struct be_adapter *adapter,
48754 /* Deal with any spurious interrupts that come
48755 * without events
48756 */
48757- be_eq_notify(adapter, eq_obj->q.id, true, true, num);
48758+ if (!num)
48759+ rearm = true;
48760+
48761+ be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
48762 if (num)
48763 napi_schedule(&eq_obj->napi);
48764
48765@@ -1053,49 +1682,55 @@ static void be_eq_clean(struct be_adapter *adapter,
48766 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
48767 }
48768
48769-static void be_rx_q_clean(struct be_adapter *adapter)
48770+static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
48771 {
48772 struct be_rx_page_info *page_info;
48773- struct be_queue_info *rxq = &adapter->rx_obj.q;
48774- struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
48775- struct be_eth_rx_compl *rxcp;
48776+ struct be_queue_info *rxq = &rxo->q;
48777+ struct be_queue_info *rx_cq = &rxo->cq;
48778+ struct be_rx_compl_info *rxcp;
48779 u16 tail;
48780
48781 /* First cleanup pending rx completions */
48782- while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
48783- be_rx_compl_discard(adapter, rxcp);
48784- be_rx_compl_reset(rxcp);
48785+ while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
48786+ be_rx_compl_discard(adapter, rxo, rxcp);
48787 be_cq_notify(adapter, rx_cq->id, true, 1);
48788 }
48789
48790 /* Then free posted rx buffer that were not used */
48791 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
48792 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
48793- page_info = get_rx_page_info(adapter, tail);
48794+ page_info = get_rx_page_info(adapter, rxo, tail);
48795 put_page(page_info->page);
48796 memset(page_info, 0, sizeof(*page_info));
48797 }
48798 BUG_ON(atomic_read(&rxq->used));
48799+ rxq->tail = rxq->head = 0;
48800 }
48801
48802-static void be_tx_compl_clean(struct be_adapter *adapter)
48803+static void be_tx_compl_clean(struct be_adapter *adapter,
48804+ struct be_tx_obj *txo)
48805 {
48806- struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
48807- struct be_queue_info *txq = &adapter->tx_obj.q;
48808+ struct be_queue_info *tx_cq = &txo->cq;
48809+ struct be_queue_info *txq = &txo->q;
48810 struct be_eth_tx_compl *txcp;
48811- u16 end_idx, cmpl = 0, timeo = 0;
48812+ u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
48813+ struct sk_buff **sent_skbs = txo->sent_skb_list;
48814+ struct sk_buff *sent_skb;
48815+ bool dummy_wrb;
48816
48817 /* Wait for a max of 200ms for all the tx-completions to arrive. */
48818 do {
48819 while ((txcp = be_tx_compl_get(tx_cq))) {
48820 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
48821 wrb_index, txcp);
48822- be_tx_compl_process(adapter, end_idx);
48823+ num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
48824 cmpl++;
48825 }
48826 if (cmpl) {
48827 be_cq_notify(adapter, tx_cq->id, false, cmpl);
48828+ atomic_sub(num_wrbs, &txq->used);
48829 cmpl = 0;
48830+ num_wrbs = 0;
48831 }
48832
48833 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
48834@@ -1107,6 +1742,17 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
48835 if (atomic_read(&txq->used))
48836 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
48837 atomic_read(&txq->used));
48838+
48839+ /* free posted tx for which compls will never arrive */
48840+ while (atomic_read(&txq->used)) {
48841+ sent_skb = sent_skbs[txq->tail];
48842+ end_idx = txq->tail;
48843+ index_adv(&end_idx,
48844+ wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
48845+ txq->len);
48846+ num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
48847+ atomic_sub(num_wrbs, &txq->used);
48848+ }
48849 }
48850
48851 static void be_mcc_queues_destroy(struct be_adapter *adapter)
48852@@ -1145,8 +1791,9 @@ static int be_mcc_queues_create(struct be_adapter *adapter)
48853 goto mcc_cq_destroy;
48854
48855 /* Ask BE to create MCC queue */
48856- if (be_cmd_mccq_create(adapter, q, cq))
48857+ if (be_cmd_mccq_create(adapter, q, cq)) {
48858 goto mcc_q_free;
48859+ }
48860
48861 return 0;
48862
48863@@ -1163,16 +1810,20 @@ err:
48864 static void be_tx_queues_destroy(struct be_adapter *adapter)
48865 {
48866 struct be_queue_info *q;
48867+ struct be_tx_obj *txo;
48868+ u8 i;
48869
48870- q = &adapter->tx_obj.q;
48871- if (q->created)
48872- be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
48873- be_queue_free(adapter, q);
48874+ for_all_tx_queues(adapter, txo, i) {
48875+ q = &txo->q;
48876+ if (q->created)
48877+ be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
48878+ be_queue_free(adapter, q);
48879
48880- q = &adapter->tx_obj.cq;
48881- if (q->created)
48882- be_cmd_q_destroy(adapter, q, QTYPE_CQ);
48883- be_queue_free(adapter, q);
48884+ q = &txo->cq;
48885+ if (q->created)
48886+ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
48887+ be_queue_free(adapter, q);
48888+ }
48889
48890 /* Clear any residual events */
48891 be_eq_clean(adapter, &adapter->tx_eq);
48892@@ -1183,168 +1834,210 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
48893 be_queue_free(adapter, q);
48894 }
48895
48896+/* One TX event queue is shared by all TX compl qs */
48897 static int be_tx_queues_create(struct be_adapter *adapter)
48898 {
48899 struct be_queue_info *eq, *q, *cq;
48900+ struct be_tx_obj *txo;
48901+ u8 i, tc_id;
48902
48903 adapter->tx_eq.max_eqd = 0;
48904 adapter->tx_eq.min_eqd = 0;
48905 adapter->tx_eq.cur_eqd = 96;
48906 adapter->tx_eq.enable_aic = false;
48907- /* Alloc Tx Event queue */
48908+
48909 eq = &adapter->tx_eq.q;
48910- if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
48911+ if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
48912+ sizeof(struct be_eq_entry)))
48913 return -1;
48914
48915- /* Ask BE to create Tx Event queue */
48916 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
48917- goto tx_eq_free;
48918- /* Alloc TX eth compl queue */
48919- cq = &adapter->tx_obj.cq;
48920- if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
48921+ goto err;
48922+ adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
48923+
48924+ for_all_tx_queues(adapter, txo, i) {
48925+ cq = &txo->cq;
48926+ if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
48927 sizeof(struct be_eth_tx_compl)))
48928- goto tx_eq_destroy;
48929+ goto err;
48930
48931- /* Ask BE to create Tx eth compl queue */
48932- if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
48933- goto tx_cq_free;
48934+ if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
48935+ goto err;
48936
48937- /* Alloc TX eth queue */
48938- q = &adapter->tx_obj.q;
48939- if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
48940- goto tx_cq_destroy;
48941+ q = &txo->q;
48942+ if (be_queue_alloc(adapter, q, TX_Q_LEN,
48943+ sizeof(struct be_eth_wrb)))
48944+ goto err;
48945
48946- /* Ask BE to create Tx eth queue */
48947- if (be_cmd_txq_create(adapter, q, cq))
48948- goto tx_q_free;
48949+ if (be_cmd_txq_create(adapter, q, cq, &tc_id))
48950+ goto err;
48951+
48952+ if (adapter->flags & BE_FLAGS_DCBX)
48953+ adapter->tc_txq_map[tc_id] = i;
48954+ }
48955 return 0;
48956
48957-tx_q_free:
48958- be_queue_free(adapter, q);
48959-tx_cq_destroy:
48960- be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
48961-tx_cq_free:
48962- be_queue_free(adapter, cq);
48963-tx_eq_destroy:
48964- be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
48965-tx_eq_free:
48966- be_queue_free(adapter, eq);
48967+err:
48968+ be_tx_queues_destroy(adapter);
48969 return -1;
48970 }
48971
48972 static void be_rx_queues_destroy(struct be_adapter *adapter)
48973 {
48974 struct be_queue_info *q;
48975+ struct be_rx_obj *rxo;
48976+ int i;
48977
48978- q = &adapter->rx_obj.q;
48979- if (q->created) {
48980- be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
48981- be_rx_q_clean(adapter);
48982- }
48983- be_queue_free(adapter, q);
48984+ for_all_rx_queues(adapter, rxo, i) {
48985+ be_queue_free(adapter, &rxo->q);
48986+
48987+ q = &rxo->cq;
48988+ if (q->created)
48989+ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
48990+ be_queue_free(adapter, q);
48991
48992- q = &adapter->rx_obj.cq;
48993- if (q->created)
48994- be_cmd_q_destroy(adapter, q, QTYPE_CQ);
48995- be_queue_free(adapter, q);
48996+ q = &rxo->rx_eq.q;
48997+ if (q->created)
48998+ be_cmd_q_destroy(adapter, q, QTYPE_EQ);
48999+ be_queue_free(adapter, q);
49000
49001- /* Clear any residual events */
49002- be_eq_clean(adapter, &adapter->rx_eq);
49003+ kfree(rxo->page_info_tbl);
49004+ }
49005+}
49006
49007- q = &adapter->rx_eq.q;
49008- if (q->created)
49009- be_cmd_q_destroy(adapter, q, QTYPE_EQ);
49010- be_queue_free(adapter, q);
49011+/* Is BE in a multi-channel mode */
49012+static inline bool be_is_mc(struct be_adapter *adapter) {
49013+ return (adapter->function_mode & FLEX10_MODE ||
49014+ adapter->function_mode & VNIC_MODE ||
49015+ adapter->function_mode & UMC_ENABLED);
49016+}
49017+
49018+static u32 be_num_rxqs_want(struct be_adapter *adapter)
49019+{
49020+ if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
49021+ adapter->num_vfs == 0 && be_physfn(adapter) &&
49022+ !be_is_mc(adapter)) {
49023+ return 1 + MAX_RSS_QS; /* one default non-RSS queue */
49024+ } else {
49025+ dev_warn(&adapter->pdev->dev,
49026+ "No support for multiple RX queues\n");
49027+ return 1;
49028+ }
49029 }
49030
49031 static int be_rx_queues_create(struct be_adapter *adapter)
49032 {
49033 struct be_queue_info *eq, *q, *cq;
49034- int rc;
49035+ struct be_rx_obj *rxo;
49036+ int rc, i;
49037
49038+ adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
49039+ msix_enabled(adapter) ?
49040+ adapter->num_msix_vec - 1 : 1);
49041+ if (adapter->num_rx_qs != MAX_RX_QS)
49042+ dev_warn(&adapter->pdev->dev,
49043+ "Could create only %d receive queues",
49044+ adapter->num_rx_qs);
49045+
49046+ adapter->max_rx_coal = gro ? BE_INIT_FRAGS_PER_FRAME : 1;
49047 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
49048- adapter->rx_eq.max_eqd = BE_MAX_EQD;
49049- adapter->rx_eq.min_eqd = 0;
49050- adapter->rx_eq.cur_eqd = 0;
49051- adapter->rx_eq.enable_aic = true;
49052+ for_all_rx_queues(adapter, rxo, i) {
49053+ rxo->adapter = adapter;
49054+ rxo->rx_eq.max_eqd = BE_MAX_EQD;
49055+ rxo->rx_eq.enable_aic = true;
49056
49057- /* Alloc Rx Event queue */
49058- eq = &adapter->rx_eq.q;
49059- rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
49060- sizeof(struct be_eq_entry));
49061- if (rc)
49062- return rc;
49063+ /* EQ */
49064+ eq = &rxo->rx_eq.q;
49065+ rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
49066+ sizeof(struct be_eq_entry));
49067+ if (rc)
49068+ goto err;
49069
49070- /* Ask BE to create Rx Event queue */
49071- rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd);
49072- if (rc)
49073- goto rx_eq_free;
49074+ rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
49075+ if (rc)
49076+ goto err;
49077
49078- /* Alloc RX eth compl queue */
49079- cq = &adapter->rx_obj.cq;
49080- rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
49081- sizeof(struct be_eth_rx_compl));
49082- if (rc)
49083- goto rx_eq_destroy;
49084+ rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
49085
49086- /* Ask BE to create Rx eth compl queue */
49087- rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
49088- if (rc)
49089- goto rx_cq_free;
49090+ /* CQ */
49091+ cq = &rxo->cq;
49092+ rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
49093+ sizeof(struct be_eth_rx_compl));
49094+ if (rc)
49095+ goto err;
49096
49097- /* Alloc RX eth queue */
49098- q = &adapter->rx_obj.q;
49099- rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
49100- if (rc)
49101- goto rx_cq_destroy;
49102+ rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
49103+ if (rc)
49104+ goto err;
49105
49106- /* Ask BE to create Rx eth queue */
49107- rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
49108- BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
49109- if (rc)
49110- goto rx_q_free;
49111+ /* Rx Q - will be created in be_open() */
49112+ q = &rxo->q;
49113+ rc = be_queue_alloc(adapter, q, RX_Q_LEN,
49114+ sizeof(struct be_eth_rx_d));
49115+ if (rc)
49116+ goto err;
49117+
49118+ rxo->page_info_tbl = kzalloc(sizeof(struct be_rx_page_info) *
49119+ RX_Q_LEN, GFP_KERNEL);
49120+ if (!rxo->page_info_tbl)
49121+ goto err;
49122+ }
49123
49124 return 0;
49125-rx_q_free:
49126- be_queue_free(adapter, q);
49127-rx_cq_destroy:
49128- be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
49129-rx_cq_free:
49130- be_queue_free(adapter, cq);
49131-rx_eq_destroy:
49132- be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
49133-rx_eq_free:
49134- be_queue_free(adapter, eq);
49135- return rc;
49136+err:
49137+ be_rx_queues_destroy(adapter);
49138+ return -1;
49139 }
49140
49141-/* There are 8 evt ids per func. Retruns the evt id's bit number */
49142-static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
49143+static bool event_peek(struct be_eq_obj *eq_obj)
49144 {
49145- return eq_id - 8 * be_pci_func(adapter);
49146+ struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
49147+ if (!eqe->evt)
49148+ return false;
49149+ else
49150+ return true;
49151 }
49152
49153 static irqreturn_t be_intx(int irq, void *dev)
49154 {
49155 struct be_adapter *adapter = dev;
49156- int isr;
49157+ struct be_rx_obj *rxo;
49158+ int isr, i, tx = 0 , rx = 0;
49159
49160- isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
49161- be_pci_func(adapter) * CEV_ISR_SIZE);
49162- if (!isr)
49163- return IRQ_NONE;
49164+ if (lancer_chip(adapter)) {
49165+ if (event_peek(&adapter->tx_eq))
49166+ tx = event_handle(adapter, &adapter->tx_eq, false);
49167+ for_all_rx_queues(adapter, rxo, i) {
49168+ if (event_peek(&rxo->rx_eq))
49169+ rx |= event_handle(adapter, &rxo->rx_eq, true);
49170+ }
49171
49172- event_handle(adapter, &adapter->tx_eq);
49173- event_handle(adapter, &adapter->rx_eq);
49174+ if (!(tx || rx))
49175+ return IRQ_NONE;
49176+ } else {
49177+ isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
49178+ (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
49179+ if (!isr)
49180+ return IRQ_NONE;
49181+
49182+ if ((1 << adapter->tx_eq.eq_idx & isr))
49183+ event_handle(adapter, &adapter->tx_eq, false);
49184+
49185+ for_all_rx_queues(adapter, rxo, i) {
49186+ if ((1 << rxo->rx_eq.eq_idx & isr))
49187+ event_handle(adapter, &rxo->rx_eq, true);
49188+ }
49189+ }
49190
49191 return IRQ_HANDLED;
49192 }
49193
49194 static irqreturn_t be_msix_rx(int irq, void *dev)
49195 {
49196- struct be_adapter *adapter = dev;
49197+ struct be_rx_obj *rxo = dev;
49198+ struct be_adapter *adapter = rxo->adapter;
49199
49200- event_handle(adapter, &adapter->rx_eq);
49201+ event_handle(adapter, &rxo->rx_eq, true);
49202
49203 return IRQ_HANDLED;
49204 }
49205@@ -1353,48 +2046,72 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
49206 {
49207 struct be_adapter *adapter = dev;
49208
49209- event_handle(adapter, &adapter->tx_eq);
49210+ event_handle(adapter, &adapter->tx_eq, false);
49211
49212 return IRQ_HANDLED;
49213 }
49214
49215 static inline bool do_gro(struct be_adapter *adapter,
49216- struct be_eth_rx_compl *rxcp)
49217+ struct be_rx_compl_info *rxcp)
49218 {
49219- int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
49220- int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
49221-
49222- if (err)
49223- drvr_stats(adapter)->be_rxcp_err++;
49224-
49225- return (tcp_frame && !err) ? true : false;
49226+ return (!rxcp->tcpf || rxcp->err || adapter->max_rx_coal <= 1 ||
49227+ (rxcp->vlanf && !vlan_configured(adapter))) ?
49228+ false : true;
49229 }
49230
49231 int be_poll_rx(struct napi_struct *napi, int budget)
49232 {
49233 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
49234- struct be_adapter *adapter =
49235- container_of(rx_eq, struct be_adapter, rx_eq);
49236- struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
49237- struct be_eth_rx_compl *rxcp;
49238+ struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
49239+ struct be_adapter *adapter = rxo->adapter;
49240+ struct be_queue_info *rx_cq = &rxo->cq;
49241+ struct be_rx_compl_info *rxcp;
49242 u32 work_done;
49243+ bool flush_lro = false;
49244
49245+ rxo->stats.rx_polls++;
49246 for (work_done = 0; work_done < budget; work_done++) {
49247- rxcp = be_rx_compl_get(adapter);
49248+ rxcp = be_rx_compl_get(rxo);
49249 if (!rxcp)
49250 break;
49251
49252- if (do_gro(adapter, rxcp))
49253- be_rx_compl_process_gro(adapter, rxcp);
49254- else
49255- be_rx_compl_process(adapter, rxcp);
49256+ /* Is it a flush compl that has no data */
49257+ if (unlikely(rxcp->num_rcvd == 0))
49258+ continue;
49259
49260- be_rx_compl_reset(rxcp);
49261+ if (unlikely(rxcp->port != adapter->port_num)) {
49262+ be_rx_compl_discard(adapter, rxo, rxcp);
49263+ be_rx_stats_update(rxo, rxcp);
49264+ continue;
49265+ }
49266+
49267+ if (likely((lancer_A0_chip(adapter) && !rxcp->err) ||
49268+ !lancer_A0_chip(adapter))) {
49269+ if (do_gro(adapter, rxcp)) {
49270+ if (adapter->gro_supported) {
49271+ be_rx_compl_process_gro(adapter, rxo,
49272+ rxcp);
49273+ } else {
49274+ be_rx_compl_process_lro(adapter, rxo,
49275+ rxcp);
49276+ flush_lro = true;
49277+ }
49278+ } else {
49279+ be_rx_compl_process(adapter, rxo, rxcp);
49280+ }
49281+ } else if (lancer_A0_chip(adapter) && rxcp->err) {
49282+ be_rx_compl_discard(adapter, rxo, rxcp);
49283+ }
49284+
49285+ be_rx_stats_update(rxo, rxcp);
49286 }
49287
49288+ if (flush_lro)
49289+ lro_flush_all(&rxo->lro_mgr);
49290+
49291 /* Refill the queue */
49292- if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
49293- be_post_rx_frags(adapter);
49294+ if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
49295+ be_post_rx_frags(rxo);
49296
49297 /* All consumed */
49298 if (work_done < budget) {
49299@@ -1404,40 +2121,13 @@ int be_poll_rx(struct napi_struct *napi, int budget)
49300 /* More to be consumed; continue with interrupts disabled */
49301 be_cq_notify(adapter, rx_cq->id, false, work_done);
49302 }
49303+
49304+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
49305+ adapter->netdev->last_rx = jiffies;
49306+#endif
49307 return work_done;
49308 }
49309
49310-void be_process_tx(struct be_adapter *adapter)
49311-{
49312- struct be_queue_info *txq = &adapter->tx_obj.q;
49313- struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
49314- struct be_eth_tx_compl *txcp;
49315- u32 num_cmpl = 0;
49316- u16 end_idx;
49317-
49318- while ((txcp = be_tx_compl_get(tx_cq))) {
49319- end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
49320- wrb_index, txcp);
49321- be_tx_compl_process(adapter, end_idx);
49322- num_cmpl++;
49323- }
49324-
49325- if (num_cmpl) {
49326- be_cq_notify(adapter, tx_cq->id, true, num_cmpl);
49327-
49328- /* As Tx wrbs have been freed up, wake up netdev queue if
49329- * it was stopped due to lack of tx wrbs.
49330- */
49331- if (netif_queue_stopped(adapter->netdev) &&
49332- atomic_read(&txq->used) < txq->len / 2) {
49333- netif_wake_queue(adapter->netdev);
49334- }
49335-
49336- drvr_stats(adapter)->be_tx_events++;
49337- drvr_stats(adapter)->be_tx_compl += num_cmpl;
49338- }
49339-}
49340-
49341 /* As TX and MCC share the same EQ check for both TX and MCC completions.
49342 * For TX/MCC we don't honour budget; consume everything
49343 */
49344@@ -1446,96 +2136,264 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
49345 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
49346 struct be_adapter *adapter =
49347 container_of(tx_eq, struct be_adapter, tx_eq);
49348+ struct be_tx_obj *txo;
49349+ struct be_eth_tx_compl *txcp;
49350+ int tx_compl, mcc_compl, status = 0;
49351+ u8 i;
49352+ u16 num_wrbs;
49353+
49354+ for_all_tx_queues(adapter, txo, i) {
49355+ tx_compl = 0;
49356+ num_wrbs = 0;
49357+ while ((txcp = be_tx_compl_get(&txo->cq))) {
49358+ num_wrbs += be_tx_compl_process(adapter, txo,
49359+ AMAP_GET_BITS(struct amap_eth_tx_compl,
49360+ wrb_index, txcp));
49361+ tx_compl++;
49362+ }
49363+ if (tx_compl) {
49364+ be_cq_notify(adapter, txo->cq.id, true, tx_compl);
49365+
49366+ atomic_sub(num_wrbs, &txo->q.used);
49367+
49368+ /* As Tx wrbs have been freed up, wake up netdev queue
49369+ * if it was stopped due to lack of tx wrbs. */
49370+ if (__netif_subqueue_stopped(adapter->netdev, i) &&
49371+ atomic_read(&txo->q.used) < txo->q.len / 2) {
49372+ netif_wake_subqueue(adapter->netdev, i);
49373+ }
49374+
49375+ adapter->drv_stats.be_tx_events++;
49376+ txo->stats.be_tx_compl += tx_compl;
49377+ }
49378+ }
49379+
49380+ mcc_compl = be_process_mcc(adapter, &status);
49381+
49382+ if (mcc_compl) {
49383+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
49384+ be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
49385+ }
49386
49387 napi_complete(napi);
49388
49389- be_process_tx(adapter);
49390-
49391- be_process_mcc(adapter);
49392-
49393+ be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
49394 return 1;
49395 }
49396
49397+void be_detect_dump_ue(struct be_adapter *adapter)
49398+{
49399+ u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
49400+ u32 i;
49401+
49402+ pci_read_config_dword(adapter->pdev,
49403+ PCICFG_UE_STATUS_LOW, &ue_status_lo);
49404+ pci_read_config_dword(adapter->pdev,
49405+ PCICFG_UE_STATUS_HIGH, &ue_status_hi);
49406+ pci_read_config_dword(adapter->pdev,
49407+ PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
49408+ pci_read_config_dword(adapter->pdev,
49409+ PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
49410+
49411+ ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
49412+ ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
49413+
49414+ if (ue_status_lo || ue_status_hi) {
49415+ adapter->ue_detected = true;
49416+ adapter->eeh_err = true;
49417+ dev_err(&adapter->pdev->dev, "UE Detected!!\n");
49418+ }
49419+
49420+ if (ue_status_lo) {
49421+ for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
49422+ if (ue_status_lo & 1)
49423+ dev_err(&adapter->pdev->dev,
49424+ "UE: %s bit set\n", ue_status_low_desc[i]);
49425+ }
49426+ }
49427+ if (ue_status_hi) {
49428+ for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
49429+ if (ue_status_hi & 1)
49430+ dev_err(&adapter->pdev->dev,
49431+ "UE: %s bit set\n", ue_status_hi_desc[i]);
49432+ }
49433+ }
49434+
49435+}
49436+
49437 static void be_worker(struct work_struct *work)
49438 {
49439 struct be_adapter *adapter =
49440 container_of(work, struct be_adapter, work.work);
49441+ struct be_rx_obj *rxo;
49442+ struct be_tx_obj *txo;
49443+ int i;
49444
49445- be_cmd_get_stats(adapter, &adapter->stats.cmd);
49446+ if (!adapter->ue_detected && !lancer_chip(adapter))
49447+ be_detect_dump_ue(adapter);
49448
49449- /* Set EQ delay */
49450- be_rx_eqd_update(adapter);
49451+ /* when interrupts are not yet enabled, just reap any pending
49452+ * mcc completions */
49453+ if (!netif_running(adapter->netdev)) {
49454+ int mcc_compl, status = 0;
49455
49456- be_tx_rate_update(adapter);
49457- be_rx_rate_update(adapter);
49458+ mcc_compl = be_process_mcc(adapter, &status);
49459
49460- if (adapter->rx_post_starved) {
49461- adapter->rx_post_starved = false;
49462- be_post_rx_frags(adapter);
49463+ if (mcc_compl) {
49464+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
49465+ be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
49466+ }
49467+
49468+ goto reschedule;
49469+ }
49470+
49471+ if (!adapter->stats_cmd_sent)
49472+ be_cmd_get_stats(adapter, &adapter->stats_cmd);
49473+
49474+ for_all_tx_queues(adapter, txo, i)
49475+ be_tx_rate_update(txo);
49476+
49477+ for_all_rx_queues(adapter, rxo, i) {
49478+ be_rx_rate_update(rxo);
49479+ be_rx_eqd_update(adapter, rxo);
49480+
49481+ if (rxo->rx_post_starved) {
49482+ rxo->rx_post_starved = false;
49483+ be_post_rx_frags(rxo);
49484+ }
49485 }
49486
49487+reschedule:
49488+ adapter->work_counter++;
49489 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
49490 }
49491
49492+static void be_msix_disable(struct be_adapter *adapter)
49493+{
49494+ if (msix_enabled(adapter)) {
49495+ pci_disable_msix(adapter->pdev);
49496+ adapter->num_msix_vec = 0;
49497+ }
49498+}
49499+
49500 static void be_msix_enable(struct be_adapter *adapter)
49501 {
49502- int i, status;
49503+#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
49504+ int i, status, num_vec;
49505
49506- for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
49507+ num_vec = be_num_rxqs_want(adapter) + 1;
49508+
49509+ for (i = 0; i < num_vec; i++)
49510 adapter->msix_entries[i].entry = i;
49511
49512- status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
49513- BE_NUM_MSIX_VECTORS);
49514- if (status == 0)
49515- adapter->msix_enabled = true;
49516+ status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
49517+ if (status == 0) {
49518+ goto done;
49519+ } else if (status >= BE_MIN_MSIX_VECTORS) {
49520+ num_vec = status;
49521+ if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
49522+ num_vec) == 0)
49523+ goto done;
49524+ }
49525 return;
49526+done:
49527+ adapter->num_msix_vec = num_vec;
49528+ return;
49529+}
49530+
49531+static void be_sriov_enable(struct be_adapter *adapter)
49532+{
49533+ be_check_sriov_fn_type(adapter);
49534+#ifdef CONFIG_PCI_IOV
49535+ if (be_physfn(adapter) && num_vfs) {
49536+ int status, pos;
49537+ u16 nvfs;
49538+
49539+ pos = pci_find_ext_capability(adapter->pdev,
49540+ PCI_EXT_CAP_ID_SRIOV);
49541+ pci_read_config_word(adapter->pdev,
49542+ pos + PCI_SRIOV_TOTAL_VF, &nvfs);
49543+ adapter->num_vfs = num_vfs;
49544+ if (num_vfs > nvfs) {
49545+ dev_info(&adapter->pdev->dev,
49546+ "Device supports %d VFs and not %d\n",
49547+ nvfs, num_vfs);
49548+ adapter->num_vfs = nvfs;
49549+ }
49550+
49551+ status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
49552+ if (status)
49553+ adapter->num_vfs = 0;
49554+ }
49555+#endif
49556+}
49557+
49558+static void be_sriov_disable(struct be_adapter *adapter)
49559+{
49560+#ifdef CONFIG_PCI_IOV
49561+ if (adapter->num_vfs > 0) {
49562+ pci_disable_sriov(adapter->pdev);
49563+ adapter->num_vfs = 0;
49564+ }
49565+#endif
49566 }
49567
49568-static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
49569+static inline int be_msix_vec_get(struct be_adapter *adapter,
49570+ struct be_eq_obj *eq_obj)
49571 {
49572- return adapter->msix_entries[
49573- be_evt_bit_get(adapter, eq_id)].vector;
49574+ return adapter->msix_entries[eq_obj->eq_idx].vector;
49575 }
49576
49577 static int be_request_irq(struct be_adapter *adapter,
49578 struct be_eq_obj *eq_obj,
49579- void *handler, char *desc)
49580+ void *handler, char *desc, void *context)
49581 {
49582 struct net_device *netdev = adapter->netdev;
49583 int vec;
49584
49585 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
49586- vec = be_msix_vec_get(adapter, eq_obj->q.id);
49587- return request_irq(vec, handler, 0, eq_obj->desc, adapter);
49588+ vec = be_msix_vec_get(adapter, eq_obj);
49589+ return request_irq(vec, handler, 0, eq_obj->desc, context);
49590 }
49591
49592-static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj)
49593+static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
49594+ void *context)
49595 {
49596- int vec = be_msix_vec_get(adapter, eq_obj->q.id);
49597- free_irq(vec, adapter);
49598+ int vec = be_msix_vec_get(adapter, eq_obj);
49599+ free_irq(vec, context);
49600 }
49601
49602 static int be_msix_register(struct be_adapter *adapter)
49603 {
49604- int status;
49605+ struct be_rx_obj *rxo;
49606+ int status, i;
49607+ char qname[10];
49608
49609- status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx");
49610+ status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
49611+ adapter);
49612 if (status)
49613 goto err;
49614
49615- status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx");
49616- if (status)
49617- goto free_tx_irq;
49618+ for_all_rx_queues(adapter, rxo, i) {
49619+ sprintf(qname, "rxq%d", i);
49620+ status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
49621+ qname, rxo);
49622+ if (status)
49623+ goto err_msix;
49624+ }
49625
49626 return 0;
49627
49628-free_tx_irq:
49629- be_free_irq(adapter, &adapter->tx_eq);
49630+err_msix:
49631+ be_free_irq(adapter, &adapter->tx_eq, adapter);
49632+
49633+ for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
49634+ be_free_irq(adapter, &rxo->rx_eq, rxo);
49635+
49636 err:
49637 dev_warn(&adapter->pdev->dev,
49638 "MSIX Request IRQ failed - err %d\n", status);
49639- pci_disable_msix(adapter->pdev);
49640- adapter->msix_enabled = false;
49641+ be_msix_disable(adapter);
49642 return status;
49643 }
49644
49645@@ -1544,10 +2402,13 @@ static int be_irq_register(struct be_adapter *adapter)
49646 struct net_device *netdev = adapter->netdev;
49647 int status;
49648
49649- if (adapter->msix_enabled) {
49650+ if (msix_enabled(adapter)) {
49651 status = be_msix_register(adapter);
49652 if (status == 0)
49653 goto done;
49654+ /* INTx is not supported for VF */
49655+ if (!be_physfn(adapter))
49656+ return status;
49657 }
49658
49659 /* INTx */
49660@@ -1567,87 +2428,363 @@ done:
49661 static void be_irq_unregister(struct be_adapter *adapter)
49662 {
49663 struct net_device *netdev = adapter->netdev;
49664+ struct be_rx_obj *rxo;
49665+ int i;
49666
49667 if (!adapter->isr_registered)
49668 return;
49669
49670 /* INTx */
49671- if (!adapter->msix_enabled) {
49672+ if (!msix_enabled(adapter)) {
49673 free_irq(netdev->irq, adapter);
49674 goto done;
49675 }
49676
49677 /* MSIx */
49678- be_free_irq(adapter, &adapter->tx_eq);
49679- be_free_irq(adapter, &adapter->rx_eq);
49680+ be_free_irq(adapter, &adapter->tx_eq, adapter);
49681+
49682+ for_all_rx_queues(adapter, rxo, i)
49683+ be_free_irq(adapter, &rxo->rx_eq, rxo);
49684+
49685 done:
49686 adapter->isr_registered = false;
49687- return;
49688 }
49689
49690-static int be_open(struct net_device *netdev)
49691+static u16 be_select_queue(struct net_device *netdev,
49692+ struct sk_buff *skb)
49693 {
49694 struct be_adapter *adapter = netdev_priv(netdev);
49695- struct be_eq_obj *rx_eq = &adapter->rx_eq;
49696+ u8 prio;
49697+
49698+ if (adapter->num_tx_qs == 1)
49699+ return 0;
49700+
49701+ prio = (vlan_tx_tag_get(skb) & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
49702+ return adapter->tc_txq_map[adapter->prio_tc_map[prio]];
49703+}
49704+
49705+static void be_rx_queues_clear(struct be_adapter *adapter)
49706+{
49707+ struct be_queue_info *q;
49708+ struct be_rx_obj *rxo;
49709+ int i;
49710+
49711+ for_all_rx_queues(adapter, rxo, i) {
49712+ q = &rxo->q;
49713+ if (q->created) {
49714+ be_cmd_rxq_destroy(adapter, q);
49715+ /* After the rxq is invalidated, wait for a grace time
49716+ * of 1ms for all dma to end and the flush compl to
49717+ * arrive
49718+ */
49719+ mdelay(1);
49720+ be_rx_q_clean(adapter, rxo);
49721+ }
49722+
49723+ /* Clear any residual events */
49724+ q = &rxo->rx_eq.q;
49725+ if (q->created)
49726+ be_eq_clean(adapter, &rxo->rx_eq);
49727+ }
49728+}
49729+
49730+static int be_close(struct net_device *netdev)
49731+{
49732+ struct be_adapter *adapter = netdev_priv(netdev);
49733+ struct be_rx_obj *rxo;
49734+ struct be_tx_obj *txo;
49735 struct be_eq_obj *tx_eq = &adapter->tx_eq;
49736- bool link_up;
49737- int status;
49738+ int vec, i;
49739+
49740+ be_async_mcc_disable(adapter);
49741+
49742+ netif_stop_queue(netdev);
49743+ netif_carrier_off(netdev);
49744+ adapter->link_status = LINK_DOWN;
49745+
49746+ if (!lancer_chip(adapter))
49747+ be_intr_set(adapter, false);
49748+
49749+ for_all_rx_queues(adapter, rxo, i)
49750+ napi_disable(&rxo->rx_eq.napi);
49751+
49752+ napi_disable(&tx_eq->napi);
49753+
49754+ if (lancer_chip(adapter)) {
49755+ be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
49756+ for_all_rx_queues(adapter, rxo, i)
49757+ be_cq_notify(adapter, rxo->cq.id, false, 0);
49758+ for_all_tx_queues(adapter, txo, i)
49759+ be_cq_notify(adapter, txo->cq.id, false, 0);
49760+ }
49761+
49762+ if (msix_enabled(adapter)) {
49763+ vec = be_msix_vec_get(adapter, tx_eq);
49764+ synchronize_irq(vec);
49765+
49766+ for_all_rx_queues(adapter, rxo, i) {
49767+ vec = be_msix_vec_get(adapter, &rxo->rx_eq);
49768+ synchronize_irq(vec);
49769+ }
49770+ } else {
49771+ synchronize_irq(netdev->irq);
49772+ }
49773+ be_irq_unregister(adapter);
49774+
49775+ /* Wait for all pending tx completions to arrive so that
49776+ * all tx skbs are freed.
49777+ */
49778+ for_all_tx_queues(adapter, txo, i)
49779+ be_tx_compl_clean(adapter, txo);
49780+
49781+ be_rx_queues_clear(adapter);
49782+ return 0;
49783+}
49784+
49785+static int be_rx_queues_setup(struct be_adapter *adapter)
49786+{
49787+ struct be_rx_obj *rxo;
49788+ int rc, i;
49789+ u8 rsstable[MAX_RSS_QS];
49790+
49791+ for_all_rx_queues(adapter, rxo, i) {
49792+ rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
49793+ rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
49794+ adapter->if_handle,
49795+ (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
49796+ if (rc)
49797+ return rc;
49798+ }
49799+
49800+ if (be_multi_rxq(adapter)) {
49801+ for_all_rss_queues(adapter, rxo, i)
49802+ rsstable[i] = rxo->rss_id;
49803+
49804+ rc = be_cmd_rss_config(adapter, rsstable,
49805+ adapter->num_rx_qs - 1);
49806+ if (rc)
49807+ return rc;
49808+ }
49809
49810 /* First time posting */
49811- be_post_rx_frags(adapter);
49812+ for_all_rx_queues(adapter, rxo, i) {
49813+ be_post_rx_frags(rxo);
49814+ napi_enable(&rxo->rx_eq.napi);
49815+ }
49816+ return 0;
49817+}
49818+
49819+static int be_open(struct net_device *netdev)
49820+{
49821+ struct be_adapter *adapter = netdev_priv(netdev);
49822+ struct be_eq_obj *tx_eq = &adapter->tx_eq;
49823+ struct be_rx_obj *rxo;
49824+ int link_status;
49825+ int status, i;
49826+ u8 mac_speed;
49827+ u16 link_speed;
49828+
49829+ status = be_rx_queues_setup(adapter);
49830+ if (status)
49831+ goto err;
49832
49833- napi_enable(&rx_eq->napi);
49834 napi_enable(&tx_eq->napi);
49835
49836 be_irq_register(adapter);
49837
49838- be_intr_set(adapter, true);
49839+ if (!lancer_chip(adapter))
49840+ be_intr_set(adapter, true);
49841
49842 /* The evt queues are created in unarmed state; arm them */
49843- be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
49844+ for_all_rx_queues(adapter, rxo, i) {
49845+ be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
49846+ be_cq_notify(adapter, rxo->cq.id, true, 0);
49847+ }
49848 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
49849
49850- /* Rx compl queue may be in unarmed state; rearm it */
49851- be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
49852+ /* Now that interrupts are on we can process async mcc */
49853+ be_async_mcc_enable(adapter);
49854
49855- status = be_cmd_link_status_query(adapter, &link_up);
49856+ status = be_cmd_link_status_query(adapter, &link_status, &mac_speed,
49857+ &link_speed, 0);
49858 if (status)
49859- goto ret_sts;
49860- be_link_status_update(adapter, link_up);
49861+ goto err;
49862+ be_link_status_update(adapter, link_status);
49863
49864- status = be_vid_config(adapter);
49865+ status = be_vid_config(adapter, false, 0);
49866 if (status)
49867- goto ret_sts;
49868+ goto err;
49869
49870- status = be_cmd_set_flow_control(adapter,
49871- adapter->tx_fc, adapter->rx_fc);
49872- if (status)
49873- goto ret_sts;
49874+ if (be_physfn(adapter)) {
49875+ status = be_cmd_set_flow_control(adapter,
49876+ adapter->tx_fc, adapter->rx_fc);
49877+ if (status)
49878+ goto err;
49879+ }
49880+
49881+ return 0;
49882+err:
49883+ be_close(adapter->netdev);
49884+ return -EIO;
49885+}
49886+
49887+static int be_setup_wol(struct be_adapter *adapter, bool enable)
49888+{
49889+ struct be_dma_mem cmd;
49890+ int status = 0;
49891+ u8 mac[ETH_ALEN];
49892+
49893+ memset(mac, 0, ETH_ALEN);
49894+
49895+ cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
49896+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
49897+ if (cmd.va == NULL)
49898+ return -1;
49899+ memset(cmd.va, 0, cmd.size);
49900+
49901+ if (enable) {
49902+ status = pci_write_config_dword(adapter->pdev,
49903+ PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
49904+ if (status) {
49905+ dev_err(&adapter->pdev->dev,
49906+ "Could not enable Wake-on-lan\n");
49907+ pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
49908+ cmd.dma);
49909+ return status;
49910+ }
49911+ status = be_cmd_enable_magic_wol(adapter,
49912+ adapter->netdev->dev_addr, &cmd);
49913+ pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
49914+ pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
49915+ } else {
49916+ status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
49917+ pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
49918+ pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
49919+ }
49920+
49921+ pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
49922+ return status;
49923+}
49924+
49925+/*
49926+ * Generate a seed MAC address from the PF MAC Address using jhash.
49927+ * MAC Address for VFs are assigned incrementally starting from the seed.
49928+ * These addresses are programmed in the ASIC by the PF and the VF driver
49929+ * queries for the MAC address during its probe.
49930+ */
49931+static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
49932+{
49933+ u32 vf = 0;
49934+ int status = 0;
49935+ u8 mac[ETH_ALEN];
49936+
49937+ be_vf_eth_addr_generate(adapter, mac);
49938+
49939+ for (vf = 0; vf < adapter->num_vfs; vf++) {
49940+ status = be_cmd_pmac_add(adapter, mac,
49941+ adapter->vf_cfg[vf].vf_if_handle,
49942+ &adapter->vf_cfg[vf].vf_pmac_id,
49943+ vf + 1);
49944+ if (status)
49945+ dev_err(&adapter->pdev->dev,
49946+ "Mac address add failed for VF %d\n", vf);
49947+ else
49948+ memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
49949
49950- schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
49951-ret_sts:
49952+ mac[5] += 1;
49953+ }
49954 return status;
49955 }
49956
49957+static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
49958+{
49959+ u32 vf;
49960+
49961+ for (vf = 0; vf < adapter->num_vfs; vf++) {
49962+ if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
49963+ be_cmd_pmac_del(adapter,
49964+ adapter->vf_cfg[vf].vf_if_handle,
49965+ adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
49966+ }
49967+}
49968+
49969+static int be_num_txqs_want(struct be_adapter *adapter)
49970+{
49971+ if (adapter->num_vfs > 0 || be_is_mc(adapter) ||
49972+ lancer_chip(adapter) || !be_physfn(adapter) ||
49973+ adapter->generation == BE_GEN2)
49974+ return 1;
49975+ else
49976+ return MAX_TX_QS;
49977+}
49978+
49979 static int be_setup(struct be_adapter *adapter)
49980 {
49981 struct net_device *netdev = adapter->netdev;
49982- u32 cap_flags, en_flags;
49983- int status;
49984-
49985- cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
49986- BE_IF_FLAGS_MCAST_PROMISCUOUS |
49987- BE_IF_FLAGS_PROMISCUOUS |
49988- BE_IF_FLAGS_PASS_L3L4_ERRORS;
49989- en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
49990- BE_IF_FLAGS_PASS_L3L4_ERRORS;
49991+ int status, fw_num_txqs, num_txqs;
49992+ u32 cap_flags, en_flags, vf = 0;
49993+ u8 mac[ETH_ALEN];
49994+
49995+ num_txqs = be_num_txqs_want(adapter);
49996+ if (num_txqs > 1) {
49997+ be_cmd_req_pg_pfc(adapter, &fw_num_txqs);
49998+ num_txqs = min(num_txqs, fw_num_txqs);
49999+ }
50000+ adapter->num_tx_qs = num_txqs;
50001+ if (adapter->num_tx_qs != MAX_TX_QS)
50002+ netif_set_real_num_tx_queues(adapter->netdev,
50003+ adapter->num_tx_qs);
50004+
50005+ be_cmd_req_native_mode(adapter);
50006+
50007+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
50008+ BE_IF_FLAGS_BROADCAST |
50009+ BE_IF_FLAGS_MULTICAST;
50010+
50011+ if (be_physfn(adapter)) {
50012+ if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
50013+ cap_flags |= BE_IF_FLAGS_RSS;
50014+ en_flags |= BE_IF_FLAGS_RSS;
50015+ }
50016+ cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
50017+ BE_IF_FLAGS_PROMISCUOUS;
50018+ if (!lancer_A0_chip(adapter)) {
50019+ cap_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
50020+ en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
50021+ }
50022+ }
50023
50024 status = be_cmd_if_create(adapter, cap_flags, en_flags,
50025 netdev->dev_addr, false/* pmac_invalid */,
50026- &adapter->if_handle, &adapter->pmac_id);
50027+ &adapter->if_handle, &adapter->pmac_id, 0);
50028 if (status != 0)
50029 goto do_none;
50030
50031+ if (be_physfn(adapter)) {
50032+ while (vf < adapter->num_vfs) {
50033+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
50034+ BE_IF_FLAGS_BROADCAST;
50035+ status = be_cmd_if_create(adapter, cap_flags,
50036+ en_flags, mac, true,
50037+ &adapter->vf_cfg[vf].vf_if_handle,
50038+ NULL, vf+1);
50039+ if (status) {
50040+ dev_err(&adapter->pdev->dev,
50041+ "Interface Create failed for VF %d\n", vf);
50042+ goto if_destroy;
50043+ }
50044+ adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
50045+ vf++;
50046+ }
50047+ } else {
50048+ status = be_cmd_mac_addr_query(adapter, mac,
50049+ MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
50050+ if (!status) {
50051+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
50052+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
50053+ }
50054+ }
50055+
50056 status = be_tx_queues_create(adapter);
50057 if (status != 0)
50058 goto if_destroy;
50059@@ -1656,10 +2793,15 @@ static int be_setup(struct be_adapter *adapter)
50060 if (status != 0)
50061 goto tx_qs_destroy;
50062
50063+ /* Allow all priorities by default. A GRP5 evt may modify this */
50064+ adapter->vlan_prio_bmap = 0xff;
50065+
50066 status = be_mcc_queues_create(adapter);
50067 if (status != 0)
50068 goto rx_qs_destroy;
50069
50070+ adapter->link_speed = -1;
50071+
50072 return 0;
50073
50074 rx_qs_destroy:
50075@@ -1667,158 +2809,392 @@ rx_qs_destroy:
50076 tx_qs_destroy:
50077 be_tx_queues_destroy(adapter);
50078 if_destroy:
50079- be_cmd_if_destroy(adapter, adapter->if_handle);
50080+ if (be_physfn(adapter)) {
50081+ for (vf = 0; vf < adapter->num_vfs; vf++)
50082+ if (adapter->vf_cfg[vf].vf_if_handle)
50083+ be_cmd_if_destroy(adapter,
50084+ adapter->vf_cfg[vf].vf_if_handle,
50085+ vf + 1);
50086+ }
50087+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
50088 do_none:
50089 return status;
50090 }
50091
50092 static int be_clear(struct be_adapter *adapter)
50093 {
50094+ int vf;
50095+
50096+ if (be_physfn(adapter) && adapter->num_vfs)
50097+ be_vf_eth_addr_rem(adapter);
50098+
50099 be_mcc_queues_destroy(adapter);
50100 be_rx_queues_destroy(adapter);
50101 be_tx_queues_destroy(adapter);
50102+ adapter->eq_next_idx = 0;
50103
50104- be_cmd_if_destroy(adapter, adapter->if_handle);
50105+ if (be_physfn(adapter)) {
50106+ for (vf = 0; vf < adapter->num_vfs; vf++)
50107+ if (adapter->vf_cfg[vf].vf_if_handle)
50108+ be_cmd_if_destroy(adapter,
50109+ adapter->vf_cfg[vf].vf_if_handle, vf + 1);
50110+ }
50111+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
50112
50113+ /* tell fw we're done with firing cmds */
50114+ be_cmd_fw_clean(adapter);
50115 return 0;
50116 }
50117
50118-static int be_close(struct net_device *netdev)
50119+static void be_cpy_drv_ver(struct be_adapter *adapter, void *va)
50120+{
50121+ struct mgmt_controller_attrib *attrib =
50122+ (struct mgmt_controller_attrib *) ((u8*) va +
50123+ sizeof(struct be_cmd_resp_hdr));
50124+
50125+ memcpy(attrib->hba_attribs.driver_version_string,
50126+ DRV_VER, sizeof(DRV_VER));
50127+ attrib->pci_bus_number = adapter->pdev->bus->number;
50128+ attrib->pci_device_number = PCI_SLOT(adapter->pdev->devfn);
50129+ return;
50130+}
50131+
50132+#define IOCTL_COOKIE "SERVERENGINES CORP"
50133+static int be_do_ioctl(struct net_device *netdev,
50134+ struct ifreq *ifr, int cmd)
50135 {
50136 struct be_adapter *adapter = netdev_priv(netdev);
50137- struct be_eq_obj *rx_eq = &adapter->rx_eq;
50138- struct be_eq_obj *tx_eq = &adapter->tx_eq;
50139- int vec;
50140+ struct be_cmd_req_hdr req;
50141+ struct be_cmd_resp_hdr *resp;
50142+ void *data = ifr->ifr_data;
50143+ void *ioctl_ptr;
50144+ void *va;
50145+ dma_addr_t dma;
50146+ u32 req_size;
50147+ int status, ret = 0;
50148+ u8 cookie[32];
50149+
50150+ switch (cmd) {
50151+ case SIOCDEVPRIVATE:
50152+ if (copy_from_user(cookie, data, strlen(IOCTL_COOKIE)))
50153+ return -EFAULT;
50154+
50155+ if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
50156+ return -EINVAL;
50157
50158- cancel_delayed_work_sync(&adapter->work);
50159+ ioctl_ptr = (u8 *)data + strlen(IOCTL_COOKIE);
50160+ if (copy_from_user(&req, ioctl_ptr,
50161+ sizeof(struct be_cmd_req_hdr)))
50162+ return -EFAULT;
50163
50164- netif_stop_queue(netdev);
50165- netif_carrier_off(netdev);
50166- adapter->link_up = false;
50167+ req_size = le32_to_cpu(req.request_length);
50168+ if (req_size > 65536)
50169+ return -EINVAL;
50170
50171- be_intr_set(adapter, false);
50172+ req_size += sizeof(struct be_cmd_req_hdr);
50173+ va = pci_alloc_consistent(adapter->pdev, req_size, &dma);
50174+ if (!va)
50175+ return -ENOMEM;
50176+ if (copy_from_user(va, ioctl_ptr, req_size)) {
50177+ ret = -EFAULT;
50178+ break;
50179+ }
50180
50181- if (adapter->msix_enabled) {
50182- vec = be_msix_vec_get(adapter, tx_eq->q.id);
50183- synchronize_irq(vec);
50184- vec = be_msix_vec_get(adapter, rx_eq->q.id);
50185- synchronize_irq(vec);
50186- } else {
50187- synchronize_irq(netdev->irq);
50188+ status = be_cmd_pass_ext_ioctl(adapter, dma, req_size, va);
50189+ if (status == -1) {
50190+ ret = -EIO;
50191+ break;
50192+ }
50193+
50194+ resp = (struct be_cmd_resp_hdr *) va;
50195+ if (!status) {
50196+ if (req.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES)
50197+ be_cpy_drv_ver(adapter, va);
50198+ }
50199+
50200+ if (copy_to_user(ioctl_ptr, va, req_size)) {
50201+ ret = -EFAULT;
50202+ break;
50203+ }
50204+ break;
50205+ default:
50206+ return -EOPNOTSUPP;
50207 }
50208- be_irq_unregister(adapter);
50209
50210- napi_disable(&rx_eq->napi);
50211- napi_disable(&tx_eq->napi);
50212+ if (va)
50213+ pci_free_consistent(adapter->pdev, req_size, va, dma);
50214+
50215+ return ret;
50216+}
50217+
50218+#ifdef CONFIG_NET_POLL_CONTROLLER
50219+static void be_netpoll(struct net_device *netdev)
50220+{
50221+ struct be_adapter *adapter = netdev_priv(netdev);
50222+ struct be_rx_obj *rxo;
50223+ int i;
50224
50225- /* Wait for all pending tx completions to arrive so that
50226- * all tx skbs are freed.
50227- */
50228- be_tx_compl_clean(adapter);
50229+ event_handle(adapter, &adapter->tx_eq, false);
50230+ for_all_rx_queues(adapter, rxo, i)
50231+ event_handle(adapter, &rxo->rx_eq, true);
50232+
50233+ return;
50234+}
50235+#endif
50236+
50237+static int be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
50238+ void **ip_hdr, void **tcpudp_hdr,
50239+ u64 *hdr_flags, void *priv)
50240+{
50241+ struct ethhdr *eh;
50242+ struct vlan_ethhdr *veh;
50243+ struct iphdr *iph;
50244+ u8 *va = page_address(frag->page) + frag->page_offset;
50245+ unsigned long ll_hlen;
50246+
50247+ prefetch(va);
50248+ eh = (struct ethhdr *)va;
50249+ *mac_hdr = eh;
50250+ ll_hlen = ETH_HLEN;
50251+ if (eh->h_proto != htons(ETH_P_IP)) {
50252+ if (eh->h_proto == htons(ETH_P_8021Q)) {
50253+ veh = (struct vlan_ethhdr *)va;
50254+ if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
50255+ return -1;
50256+
50257+ ll_hlen += VLAN_HLEN;
50258+ } else {
50259+ return -1;
50260+ }
50261+ }
50262+ *hdr_flags = LRO_IPV4;
50263+ iph = (struct iphdr *)(va + ll_hlen);
50264+ *ip_hdr = iph;
50265+ if (iph->protocol != IPPROTO_TCP)
50266+ return -1;
50267+ *hdr_flags |= LRO_TCP;
50268+ *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
50269
50270 return 0;
50271 }
50272
50273-#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
50274+static void be_lro_init(struct be_adapter *adapter, struct net_device *netdev)
50275+{
50276+ struct net_lro_mgr *lro_mgr;
50277+ struct be_rx_obj *rxo;
50278+ int i;
50279+
50280+ for_all_rx_queues(adapter, rxo, i) {
50281+ lro_mgr = &rxo->lro_mgr;
50282+ lro_mgr->dev = netdev;
50283+ lro_mgr->features = LRO_F_NAPI;
50284+ lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
50285+ lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
50286+ lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS;
50287+ lro_mgr->lro_arr = rxo->lro_desc;
50288+ lro_mgr->get_frag_header = be_get_frag_header;
50289+ lro_mgr->max_aggr = BE_MAX_FRAGS_PER_FRAME;
50290+ }
50291+
50292+#ifdef NETIF_F_GRO
50293+ netdev->features |= NETIF_F_GRO;
50294+ adapter->gro_supported = true;
50295+#endif
50296+}
50297+
50298+#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
50299 char flash_cookie[2][16] = {"*** SE FLAS",
50300 "H DIRECTORY *** "};
50301-static int be_flash_image(struct be_adapter *adapter,
50302+
50303+static bool be_flash_redboot(struct be_adapter *adapter,
50304+ const u8 *p, u32 img_start, int image_size,
50305+ int hdr_size)
50306+{
50307+ u32 crc_offset;
50308+ u8 flashed_crc[4];
50309+ int status;
50310+
50311+ crc_offset = hdr_size + img_start + image_size - 4;
50312+
50313+ p += crc_offset;
50314+
50315+ status = be_cmd_get_flash_crc(adapter, flashed_crc,
50316+ (image_size - 4));
50317+ if (status) {
50318+ dev_err(&adapter->pdev->dev,
50319+ "could not get crc from flash, not flashing redboot\n");
50320+ return false;
50321+ }
50322+
50323+ /*update redboot only if crc does not match*/
50324+ if (!memcmp(flashed_crc, p, 4))
50325+ return false;
50326+ else
50327+ return true;
50328+}
50329+
50330+static bool phy_flashing_required(struct be_adapter *adapter)
50331+{
50332+ int status = 0;
50333+ struct be_phy_info phy_info;
50334+
50335+ status = be_cmd_get_phy_info(adapter, &phy_info);
50336+ if (status)
50337+ return false;
50338+ if ((phy_info.phy_type == TN_8022) &&
50339+ (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
50340+ return true;
50341+ }
50342+ return false;
50343+}
50344+
50345+static int be_flash_data(struct be_adapter *adapter,
50346 const struct firmware *fw,
50347- struct be_dma_mem *flash_cmd, u32 flash_type)
50348+ struct be_dma_mem *flash_cmd, int num_of_images)
50349+
50350 {
50351- int status;
50352- u32 flash_op, image_offset = 0, total_bytes, image_size = 0;
50353+ int status = 0, i, filehdr_size = 0;
50354+ u32 total_bytes = 0, flash_op;
50355 int num_bytes;
50356 const u8 *p = fw->data;
50357 struct be_cmd_write_flashrom *req = flash_cmd->va;
50358+ struct flash_comp *pflashcomp;
50359+ int num_comp;
50360
50361- switch (flash_type) {
50362- case FLASHROM_TYPE_ISCSI_ACTIVE:
50363- image_offset = FLASH_iSCSI_PRIMARY_IMAGE_START;
50364- image_size = FLASH_IMAGE_MAX_SIZE;
50365- break;
50366- case FLASHROM_TYPE_ISCSI_BACKUP:
50367- image_offset = FLASH_iSCSI_BACKUP_IMAGE_START;
50368- image_size = FLASH_IMAGE_MAX_SIZE;
50369- break;
50370- case FLASHROM_TYPE_FCOE_FW_ACTIVE:
50371- image_offset = FLASH_FCoE_PRIMARY_IMAGE_START;
50372- image_size = FLASH_IMAGE_MAX_SIZE;
50373- break;
50374- case FLASHROM_TYPE_FCOE_FW_BACKUP:
50375- image_offset = FLASH_FCoE_BACKUP_IMAGE_START;
50376- image_size = FLASH_IMAGE_MAX_SIZE;
50377- break;
50378- case FLASHROM_TYPE_BIOS:
50379- image_offset = FLASH_iSCSI_BIOS_START;
50380- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
50381- break;
50382- case FLASHROM_TYPE_FCOE_BIOS:
50383- image_offset = FLASH_FCoE_BIOS_START;
50384- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
50385- break;
50386- case FLASHROM_TYPE_PXE_BIOS:
50387- image_offset = FLASH_PXE_BIOS_START;
50388- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
50389- break;
50390- default:
50391- return 0;
50392+ struct flash_comp gen3_flash_types[10] = {
50393+ { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
50394+ FLASH_IMAGE_MAX_SIZE_g3},
50395+ { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
50396+ FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
50397+ { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
50398+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
50399+ { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
50400+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
50401+ { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
50402+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
50403+ { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
50404+ FLASH_IMAGE_MAX_SIZE_g3},
50405+ { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
50406+ FLASH_IMAGE_MAX_SIZE_g3},
50407+ { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
50408+ FLASH_IMAGE_MAX_SIZE_g3},
50409+ { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
50410+ FLASH_NCSI_IMAGE_MAX_SIZE_g3},
50411+ { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
50412+ FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
50413+ };
50414+ struct flash_comp gen2_flash_types[8] = {
50415+ { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
50416+ FLASH_IMAGE_MAX_SIZE_g2},
50417+ { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
50418+ FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
50419+ { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
50420+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
50421+ { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
50422+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
50423+ { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
50424+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
50425+ { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
50426+ FLASH_IMAGE_MAX_SIZE_g2},
50427+ { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
50428+ FLASH_IMAGE_MAX_SIZE_g2},
50429+ { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
50430+ FLASH_IMAGE_MAX_SIZE_g2}
50431+ };
50432+ if (adapter->generation == BE_GEN3) {
50433+ pflashcomp = gen3_flash_types;
50434+ filehdr_size = sizeof(struct flash_file_hdr_g3);
50435+ num_comp = ARRAY_SIZE(gen3_flash_types);
50436+ } else {
50437+ pflashcomp = gen2_flash_types;
50438+ filehdr_size = sizeof(struct flash_file_hdr_g2);
50439+ num_comp = ARRAY_SIZE(gen2_flash_types);
50440 }
50441+ for (i = 0; i < num_comp; i++) {
50442+ if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
50443+ memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
50444+ continue;
50445+ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
50446+ if (!phy_flashing_required(adapter))
50447+ continue;
50448+ }
50449+ if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
50450+ (!be_flash_redboot(adapter, fw->data,
50451+ pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
50452+ (num_of_images * sizeof(struct image_hdr)))))
50453+ continue;
50454
50455- p += sizeof(struct flash_file_hdr) + image_offset;
50456- if (p + image_size > fw->data + fw->size)
50457- return -1;
50458-
50459- total_bytes = image_size;
50460-
50461- while (total_bytes) {
50462- if (total_bytes > 32*1024)
50463- num_bytes = 32*1024;
50464- else
50465- num_bytes = total_bytes;
50466- total_bytes -= num_bytes;
50467-
50468- if (!total_bytes)
50469- flash_op = FLASHROM_OPER_FLASH;
50470- else
50471- flash_op = FLASHROM_OPER_SAVE;
50472- memcpy(req->params.data_buf, p, num_bytes);
50473- p += num_bytes;
50474- status = be_cmd_write_flashrom(adapter, flash_cmd,
50475- flash_type, flash_op, num_bytes);
50476- if (status) {
50477- dev_err(&adapter->pdev->dev,
50478- "cmd to write to flash rom failed. type/op %d/%d\n",
50479- flash_type, flash_op);
50480+ p = fw->data;
50481+ p += filehdr_size + pflashcomp[i].offset
50482+ + (num_of_images * sizeof(struct image_hdr));
50483+ if (p + pflashcomp[i].size > fw->data + fw->size)
50484 return -1;
50485+ total_bytes = pflashcomp[i].size;
50486+ while (total_bytes) {
50487+ if (total_bytes > 32*1024)
50488+ num_bytes = 32*1024;
50489+ else
50490+ num_bytes = total_bytes;
50491+ total_bytes -= num_bytes;
50492+ if (!total_bytes) {
50493+ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
50494+ flash_op = FLASHROM_OPER_PHY_FLASH;
50495+ else
50496+ flash_op = FLASHROM_OPER_FLASH;
50497+ } else {
50498+ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
50499+ flash_op = FLASHROM_OPER_PHY_SAVE;
50500+ else
50501+ flash_op = FLASHROM_OPER_SAVE;
50502+ }
50503+ memcpy(req->params.data_buf, p, num_bytes);
50504+ p += num_bytes;
50505+ status = be_cmd_write_flashrom(adapter, flash_cmd,
50506+ pflashcomp[i].optype, flash_op, num_bytes);
50507+ if (status) {
50508+ if ((status == ILLEGAL_IOCTL_REQ) &&
50509+ (pflashcomp[i].optype ==
50510+ IMG_TYPE_PHY_FW))
50511+ break;
50512+ dev_err(&adapter->pdev->dev,
50513+ "cmd to write to flash rom failed.\n");
50514+ return -1;
50515+ }
50516+ yield();
50517 }
50518- yield();
50519 }
50520-
50521 return 0;
50522 }
50523
50524+static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
50525+{
50526+ if (fhdr == NULL)
50527+ return 0;
50528+ if (fhdr->build[0] == '3')
50529+ return BE_GEN3;
50530+ else if (fhdr->build[0] == '2')
50531+ return BE_GEN2;
50532+ else
50533+ return 0;
50534+}
50535+
50536 int be_load_fw(struct be_adapter *adapter, u8 *func)
50537 {
50538 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
50539 const struct firmware *fw;
50540- struct flash_file_hdr *fhdr;
50541- struct flash_section_info *fsec = NULL;
50542+ struct flash_file_hdr_g2 *fhdr;
50543+ struct flash_file_hdr_g3 *fhdr3;
50544+ struct image_hdr *img_hdr_ptr = NULL;
50545 struct be_dma_mem flash_cmd;
50546- int status;
50547+ int status, i = 0, num_imgs = 0;
50548 const u8 *p;
50549- bool entry_found = false;
50550- int flash_type;
50551- char fw_ver[FW_VER_LEN];
50552- char fw_cfg;
50553
50554- status = be_cmd_get_fw_ver(adapter, fw_ver);
50555- if (status)
50556- return status;
50557+ if (!netif_running(adapter->netdev)) {
50558+ dev_err(&adapter->pdev->dev,
50559+ "Firmware load not allowed (interface is down)\n");
50560+ return -1;
50561+ }
50562
50563- fw_cfg = *(fw_ver + 2);
50564- if (fw_cfg == '0')
50565- fw_cfg = '1';
50566 strcpy(fw_file, func);
50567
50568 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
50569@@ -1826,34 +3202,9 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
50570 goto fw_exit;
50571
50572 p = fw->data;
50573- fhdr = (struct flash_file_hdr *) p;
50574- if (memcmp(fhdr->sign, FW_FILE_HDR_SIGN, strlen(FW_FILE_HDR_SIGN))) {
50575- dev_err(&adapter->pdev->dev,
50576- "Firmware(%s) load error (signature did not match)\n",
50577- fw_file);
50578- status = -1;
50579- goto fw_exit;
50580- }
50581-
50582+ fhdr = (struct flash_file_hdr_g2 *) p;
50583 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
50584
50585- p += sizeof(struct flash_file_hdr);
50586- while (p < (fw->data + fw->size)) {
50587- fsec = (struct flash_section_info *)p;
50588- if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) {
50589- entry_found = true;
50590- break;
50591- }
50592- p += 32;
50593- }
50594-
50595- if (!entry_found) {
50596- status = -1;
50597- dev_err(&adapter->pdev->dev,
50598- "Flash cookie not found in firmware image\n");
50599- goto fw_exit;
50600- }
50601-
50602 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
50603 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
50604 &flash_cmd.dma);
50605@@ -1864,12 +3215,25 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
50606 goto fw_exit;
50607 }
50608
50609- for (flash_type = FLASHROM_TYPE_ISCSI_ACTIVE;
50610- flash_type <= FLASHROM_TYPE_FCOE_FW_BACKUP; flash_type++) {
50611- status = be_flash_image(adapter, fw, &flash_cmd,
50612- flash_type);
50613- if (status)
50614- break;
50615+ if ((adapter->generation == BE_GEN3) &&
50616+ (get_ufigen_type(fhdr) == BE_GEN3)) {
50617+ fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
50618+ num_imgs = le32_to_cpu(fhdr3->num_imgs);
50619+ for (i = 0; i < num_imgs; i++) {
50620+ img_hdr_ptr = (struct image_hdr *) (fw->data +
50621+ (sizeof(struct flash_file_hdr_g3) +
50622+ i * sizeof(struct image_hdr)));
50623+ if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
50624+ status = be_flash_data(adapter, fw, &flash_cmd,
50625+ num_imgs);
50626+ }
50627+ } else if ((adapter->generation == BE_GEN2) &&
50628+ (get_ufigen_type(fhdr) == BE_GEN2)) {
50629+ status = be_flash_data(adapter, fw, &flash_cmd, 0);
50630+ } else {
50631+ dev_err(&adapter->pdev->dev,
50632+ "UFI and Interface are not compatible for flashing\n");
50633+ status = -1;
50634 }
50635
50636 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
50637@@ -1879,14 +3243,14 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
50638 goto fw_exit;
50639 }
50640
50641- dev_info(&adapter->pdev->dev, "Firmware flashed succesfully\n");
50642+ dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
50643
50644 fw_exit:
50645 release_firmware(fw);
50646 return status;
50647 }
50648
50649-static struct net_device_ops be_netdev_ops = {
50650+static net_device_ops_no_const be_netdev_ops = {
50651 .ndo_open = be_open,
50652 .ndo_stop = be_close,
50653 .ndo_start_xmit = be_xmit,
50654@@ -1898,15 +3262,32 @@ static struct net_device_ops be_netdev_ops = {
50655 .ndo_vlan_rx_register = be_vlan_register,
50656 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
50657 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
50658+#ifdef HAVE_SRIOV_CONFIG
50659+ .ndo_set_vf_mac = be_set_vf_mac,
50660+ .ndo_set_vf_vlan = be_set_vf_vlan,
50661+ .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
50662+ .ndo_get_vf_config = be_get_vf_config,
50663+#endif
50664+ .ndo_do_ioctl = be_do_ioctl,
50665+#ifdef CONFIG_NET_POLL_CONTROLLER
50666+ .ndo_poll_controller = be_netpoll,
50667+#endif
50668 };
50669
50670-static void be_netdev_init(struct net_device *netdev)
50671+static int be_netdev_init(struct net_device *netdev)
50672 {
50673 struct be_adapter *adapter = netdev_priv(netdev);
50674+ struct be_rx_obj *rxo;
50675+ int i, status = 0;
50676
50677 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
50678- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
50679- NETIF_F_GRO;
50680+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_CSUM | NETIF_F_TSO6;
50681+
50682+ netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
50683+ NETIF_F_HW_CSUM;
50684+
50685+ netdev->features |= NETIF_F_VLAN_SG | NETIF_F_VLAN_TSO |
50686+ NETIF_F_VLAN_CSUM;
50687
50688 netdev->flags |= IFF_MULTICAST;
50689
50690@@ -1918,17 +3299,30 @@ static void be_netdev_init(struct net_device *netdev)
50691
50692 netif_set_gso_max_size(netdev, 65535);
50693
50694+ if (adapter->flags & BE_FLAGS_DCBX)
50695+ be_netdev_ops.ndo_select_queue = be_select_queue;
50696 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
50697-
50698+
50699 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
50700
50701- netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
50702- BE_NAPI_WEIGHT);
50703- netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
50704+ be_lro_init(adapter, netdev);
50705+
50706+ for_all_rx_queues(adapter, rxo, i) {
50707+ status = be_netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
50708+ BE_NAPI_WEIGHT);
50709+ if (status) {
50710+ dev_err(&adapter->pdev->dev, "dummy netdev alloc fail"
50711+ "for rxo:%d\n", i);
50712+ return status;
50713+ }
50714+ }
50715+ status = be_netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
50716 BE_NAPI_WEIGHT);
50717+ if (status)
50718+ dev_err(&adapter->pdev->dev, "dummy netdev alloc fail"
50719+ "for tx\n");
50720
50721- netif_carrier_off(netdev);
50722- netif_stop_queue(netdev);
50723+ return status;
50724 }
50725
50726 static void be_unmap_pci_bars(struct be_adapter *adapter)
50727@@ -1937,37 +3331,62 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
50728 iounmap(adapter->csr);
50729 if (adapter->db)
50730 iounmap(adapter->db);
50731- if (adapter->pcicfg)
50732+ if (adapter->pcicfg && be_physfn(adapter))
50733 iounmap(adapter->pcicfg);
50734 }
50735
50736 static int be_map_pci_bars(struct be_adapter *adapter)
50737 {
50738+ struct pci_dev *pdev = adapter->pdev;
50739 u8 __iomem *addr;
50740- int pcicfg_reg;
50741+ int pcicfg_reg, db_reg;
50742
50743- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
50744- pci_resource_len(adapter->pdev, 2));
50745- if (addr == NULL)
50746- return -ENOMEM;
50747- adapter->csr = addr;
50748+ if (lancer_chip(adapter)) {
50749+ addr = ioremap_nocache(pci_resource_start(pdev, 0),
50750+ pci_resource_len(adapter->pdev, 0));
50751+ if (addr == NULL)
50752+ return -ENOMEM;
50753+ adapter->db = addr;
50754+ return 0;
50755+ }
50756
50757- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
50758- 128 * 1024);
50759- if (addr == NULL)
50760- goto pci_map_err;
50761- adapter->db = addr;
50762+ if (be_physfn(adapter)) {
50763+ addr = ioremap_nocache(pci_resource_start(pdev, 2),
50764+ pci_resource_len(pdev, 2));
50765+ if (addr == NULL)
50766+ return -ENOMEM;
50767+ adapter->csr = addr;
50768+ adapter->netdev->mem_start = pci_resource_start(pdev, 2);
50769+ adapter->netdev->mem_end = pci_resource_start(pdev, 2) +
50770+ pci_resource_len(pdev, 2);
50771+ }
50772
50773- if (adapter->generation == BE_GEN2)
50774+ if (adapter->generation == BE_GEN2) {
50775 pcicfg_reg = 1;
50776- else
50777+ db_reg = 4;
50778+ } else {
50779 pcicfg_reg = 0;
50780+ if (be_physfn(adapter))
50781+ db_reg = 4;
50782+ else
50783+ db_reg = 0;
50784+ }
50785
50786- addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
50787- pci_resource_len(adapter->pdev, pcicfg_reg));
50788+ addr = ioremap_nocache(pci_resource_start(pdev, db_reg),
50789+ pci_resource_len(pdev, db_reg));
50790 if (addr == NULL)
50791 goto pci_map_err;
50792- adapter->pcicfg = addr;
50793+ adapter->db = addr;
50794+
50795+ if (be_physfn(adapter)) {
50796+ addr = ioremap_nocache(
50797+ pci_resource_start(pdev, pcicfg_reg),
50798+ pci_resource_len(pdev, pcicfg_reg));
50799+ if (addr == NULL)
50800+ goto pci_map_err;
50801+ adapter->pcicfg = addr;
50802+ } else
50803+ adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
50804
50805 return 0;
50806 pci_map_err:
50807@@ -1985,40 +3404,69 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
50808 if (mem->va)
50809 pci_free_consistent(adapter->pdev, mem->size,
50810 mem->va, mem->dma);
50811+
50812+ mem = &adapter->rx_filter;
50813+ if (mem->va)
50814+ pci_free_consistent(adapter->pdev, mem->size,
50815+ mem->va, mem->dma);
50816 }
50817
50818 static int be_ctrl_init(struct be_adapter *adapter)
50819 {
50820 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
50821 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
50822+ struct be_dma_mem *rx_filter = &adapter->rx_filter;
50823 int status;
50824
50825 status = be_map_pci_bars(adapter);
50826 if (status)
50827- return status;
50828+ goto done;
50829
50830 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
50831 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
50832 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
50833 if (!mbox_mem_alloc->va) {
50834- be_unmap_pci_bars(adapter);
50835- return -1;
50836+ status = -ENOMEM;
50837+ goto unmap_pci_bars;
50838 }
50839+
50840 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
50841 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
50842 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
50843 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
50844- spin_lock_init(&adapter->mbox_lock);
50845+
50846+ rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
50847+ rx_filter->va = pci_alloc_consistent(adapter->pdev, rx_filter->size,
50848+ &rx_filter->dma);
50849+ if (rx_filter->va == NULL) {
50850+ status = -ENOMEM;
50851+ goto free_mbox;
50852+ }
50853+ memset(rx_filter->va, 0, rx_filter->size);
50854+
50855+ mutex_init(&adapter->mbox_lock);
50856 spin_lock_init(&adapter->mcc_lock);
50857 spin_lock_init(&adapter->mcc_cq_lock);
50858
50859+ init_completion(&adapter->flash_compl);
50860+
50861+ PCI_SAVE_STATE(adapter->pdev);
50862 return 0;
50863+
50864+free_mbox:
50865+ pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
50866+ mbox_mem_alloc->va, mbox_mem_alloc->dma);
50867+
50868+unmap_pci_bars:
50869+ be_unmap_pci_bars(adapter);
50870+
50871+done:
50872+ return status;
50873 }
50874
50875 static void be_stats_cleanup(struct be_adapter *adapter)
50876 {
50877- struct be_stats_obj *stats = &adapter->stats;
50878- struct be_dma_mem *cmd = &stats->cmd;
50879+ struct be_dma_mem *cmd = &adapter->stats_cmd;
50880
50881 if (cmd->va)
50882 pci_free_consistent(adapter->pdev, cmd->size,
50883@@ -2027,10 +3475,12 @@ static void be_stats_cleanup(struct be_adapter *adapter)
50884
50885 static int be_stats_init(struct be_adapter *adapter)
50886 {
50887- struct be_stats_obj *stats = &adapter->stats;
50888- struct be_dma_mem *cmd = &stats->cmd;
50889+ struct be_dma_mem *cmd = &adapter->stats_cmd;
50890
50891- cmd->size = sizeof(struct be_cmd_req_get_stats);
50892+ if (adapter->generation == BE_GEN2)
50893+ cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
50894+ else
50895+ cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
50896 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
50897 if (cmd->va == NULL)
50898 return -1;
50899@@ -2041,9 +3491,17 @@ static int be_stats_init(struct be_adapter *adapter)
50900 static void __devexit be_remove(struct pci_dev *pdev)
50901 {
50902 struct be_adapter *adapter = pci_get_drvdata(pdev);
50903+
50904 if (!adapter)
50905 return;
50906
50907+ cancel_delayed_work_sync(&adapter->work);
50908+
50909+#ifdef CONFIG_PALAU
50910+ be_sysfs_remove_group(adapter);
50911+#endif
50912+
50913+ /* be_close() gets called if the device is open by unregister */
50914 unregister_netdev(adapter->netdev);
50915
50916 be_clear(adapter);
50917@@ -2052,36 +3510,203 @@ static void __devexit be_remove(struct pci_dev *pdev)
50918
50919 be_ctrl_cleanup(adapter);
50920
50921- if (adapter->msix_enabled) {
50922- pci_disable_msix(adapter->pdev);
50923- adapter->msix_enabled = false;
50924- }
50925+ kfree(adapter->vf_cfg);
50926+ be_sriov_disable(adapter);
50927+
50928+ be_msix_disable(adapter);
50929
50930 pci_set_drvdata(pdev, NULL);
50931 pci_release_regions(pdev);
50932 pci_disable_device(pdev);
50933-
50934+ be_netif_napi_del(adapter->netdev);
50935 free_netdev(adapter->netdev);
50936 }
50937
50938-static int be_hw_up(struct be_adapter *adapter)
50939+static void be_pcie_slot_check(struct be_adapter *adapter)
50940+{
50941+ u32 curr, max, width, max_wd, speed, max_sp;
50942+
50943+ pci_read_config_dword(adapter->pdev, PCICFG_PCIE_LINK_STATUS_OFFSET,
50944+ &curr);
50945+ width = (curr >> PCIE_LINK_STATUS_NEG_WIDTH_SHIFT) &
50946+ PCIE_LINK_STATUS_NEG_WIDTH_MASK;
50947+ speed = (curr >> PCIE_LINK_STATUS_SPEED_SHIFT) &
50948+ PCIE_LINK_STATUS_SPEED_MASK;
50949+
50950+ pci_read_config_dword(adapter->pdev, PCICFG_PCIE_LINK_CAP_OFFSET,
50951+ &max);
50952+ max_wd = (max >> PCIE_LINK_CAP_MAX_WIDTH_SHIFT) &
50953+ PCIE_LINK_CAP_MAX_WIDTH_MASK;
50954+ max_sp = (max >> PCIE_LINK_CAP_MAX_SPEED_SHIFT) &
50955+ PCIE_LINK_CAP_MAX_SPEED_MASK;
50956+
50957+ if (width < max_wd || speed < max_sp)
50958+ dev_warn(&adapter->pdev->dev,
50959+ "Found network device in a Gen%s x%d PCIe slot. It "
50960+ "should be in a Gen2 x%d slot for best performance\n",
50961+ speed < max_sp ? "1" : "2", width, max_wd);
50962+}
50963+
50964+static int be_get_ioctl_version(char *fw_version) {
50965+ char *str[4];
50966+ int i;
50967+ int val[4];
50968+ char *endptr;
50969+
50970+ if(!fw_version)
50971+ return 0;
50972+ for(i=0; i<3; i++) {
50973+ str[i] = strsep(&fw_version, ".");
50974+ val[i] = simple_strtol(str[i], &endptr, 10);
50975+ }
50976+
50977+ if (val[0]>4 || (val[0]>3 && val[2]>143))
50978+ return 1;
50979+ return 0;
50980+}
50981+
50982+static int be_get_port_names(struct be_adapter *adapter)
50983 {
50984 int status;
50985+ int ver;
50986
50987- status = be_cmd_POST(adapter);
50988+ status = be_cmd_get_fw_ver(adapter,
50989+ adapter->fw_ver, NULL);
50990 if (status)
50991 return status;
50992+ ver = be_get_ioctl_version(adapter->fw_ver);
50993+ if (ver && (adapter->generation == BE_GEN3))
50994+ status = be_cmd_query_port_names_v1(adapter,
50995+ adapter->port_name);
50996+ else
50997+ status = be_cmd_query_port_names_v0(adapter,
50998+ adapter->port_name);
50999+ return status;
51000+}
51001
51002- status = be_cmd_reset_function(adapter);
51003+static int be_get_config(struct be_adapter *adapter)
51004+{
51005+ int status;
51006+ u8 mac[ETH_ALEN];
51007+
51008+ status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
51009+ &adapter->function_mode,
51010+ &adapter->function_caps);
51011 if (status)
51012 return status;
51013
51014- status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
51015+ status = be_cmd_get_cntl_attributes(adapter);
51016 if (status)
51017 return status;
51018
51019- status = be_cmd_query_fw_cfg(adapter,
51020- &adapter->port_num, &adapter->cap);
51021+ memset(mac, 0, ETH_ALEN);
51022+ be_pcie_slot_check(adapter);
51023+
51024+ if (be_physfn(adapter)) {
51025+ status = be_cmd_mac_addr_query(adapter, mac,
51026+ MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
51027+
51028+ if (status)
51029+ return status;
51030+
51031+ if (!is_valid_ether_addr(mac))
51032+ return -EADDRNOTAVAIL;
51033+
51034+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
51035+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
51036+ }
51037+
51038+ if (adapter->function_mode & FLEX10_MODE)
51039+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
51040+ else
51041+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
51042+
51043+ return 0;
51044+}
51045+
51046+static int be_dev_family_check(struct be_adapter *adapter)
51047+{
51048+ struct pci_dev *pdev = adapter->pdev;
51049+ u32 sli_intf = 0, if_type;
51050+
51051+ switch (pdev->device) {
51052+ case BE_DEVICE_ID1:
51053+ case OC_DEVICE_ID1:
51054+ adapter->generation = BE_GEN2;
51055+ break;
51056+ case BE_DEVICE_ID2:
51057+ case OC_DEVICE_ID2:
51058+ adapter->generation = BE_GEN3;
51059+ break;
51060+ case OC_DEVICE_ID3:
51061+ pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
51062+ if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
51063+ SLI_INTF_IF_TYPE_SHIFT;
51064+
51065+ if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
51066+ if_type != 0x02) {
51067+ dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
51068+ return -EINVAL;
51069+ }
51070+ if (num_vfs > 0) {
51071+ dev_err(&pdev->dev, "VFs not supported\n");
51072+ return -EINVAL;
51073+ }
51074+ adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
51075+ SLI_INTF_FAMILY_SHIFT);
51076+ adapter->generation = BE_GEN3;
51077+ break;
51078+ default:
51079+ adapter->generation = 0;
51080+ }
51081+ return 0;
51082+}
51083+
51084+static int lancer_wait_ready(struct be_adapter *adapter)
51085+{
51086+#define SLIPORT_READY_TIMEOUT 500
51087+ u32 sliport_status;
51088+ int status = 0, i;
51089+
51090+ for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
51091+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
51092+ if (sliport_status & SLIPORT_STATUS_RDY_MASK)
51093+ break;
51094+
51095+ msleep(20);
51096+ }
51097+
51098+ if (i == SLIPORT_READY_TIMEOUT)
51099+ status = -1;
51100+
51101+ return status;
51102+}
51103+
51104+static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
51105+{
51106+ int status;
51107+ u32 sliport_status, err, reset_needed;
51108+ status = lancer_wait_ready(adapter);
51109+ if (!status) {
51110+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
51111+ err = sliport_status & SLIPORT_STATUS_ERR_MASK;
51112+ reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
51113+ if (err && reset_needed) {
51114+ iowrite32(SLI_PORT_CONTROL_IP_MASK,
51115+ adapter->db + SLIPORT_CONTROL_OFFSET);
51116+
51117+ /* check adapter has corrected the error */
51118+ status = lancer_wait_ready(adapter);
51119+ sliport_status = ioread32(adapter->db +
51120+ SLIPORT_STATUS_OFFSET);
51121+ sliport_status &= (SLIPORT_STATUS_ERR_MASK |
51122+ SLIPORT_STATUS_RN_MASK);
51123+ if (status || sliport_status)
51124+ status = -1;
51125+ } else if (err || reset_needed) {
51126+ status = -1;
51127+ }
51128+ }
51129 return status;
51130 }
51131
51132@@ -2091,7 +3716,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
51133 int status = 0;
51134 struct be_adapter *adapter;
51135 struct net_device *netdev;
51136- u8 mac[ETH_ALEN];
51137+ u32 en;
51138
51139 status = pci_enable_device(pdev);
51140 if (status)
51141@@ -2102,31 +3727,22 @@ static int __devinit be_probe(struct pci_dev *pdev,
51142 goto disable_dev;
51143 pci_set_master(pdev);
51144
51145- netdev = alloc_etherdev(sizeof(struct be_adapter));
51146+ netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
51147 if (netdev == NULL) {
51148 status = -ENOMEM;
51149 goto rel_reg;
51150 }
51151 adapter = netdev_priv(netdev);
51152
51153- switch (pdev->device) {
51154- case BE_DEVICE_ID1:
51155- case OC_DEVICE_ID1:
51156- adapter->generation = BE_GEN2;
51157- break;
51158- case BE_DEVICE_ID2:
51159- case OC_DEVICE_ID2:
51160- adapter->generation = BE_GEN3;
51161- break;
51162- default:
51163- adapter->generation = 0;
51164- }
51165-
51166 adapter->pdev = pdev;
51167+
51168+ status = be_dev_family_check(adapter);
51169+ if (status)
51170+ goto free_netdev;
51171+
51172 pci_set_drvdata(pdev, adapter);
51173 adapter->netdev = netdev;
51174-
51175- be_msix_enable(adapter);
51176+ SET_NETDEV_DEV(netdev, &pdev->dev);
51177
51178 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
51179 if (!status) {
51180@@ -2139,46 +3755,150 @@ static int __devinit be_probe(struct pci_dev *pdev,
51181 }
51182 }
51183
51184+ be_sriov_enable(adapter);
51185+ if (adapter->num_vfs > 0) {
51186+ adapter->vf_cfg = kcalloc(adapter->num_vfs,
51187+ sizeof(struct be_vf_cfg), GFP_KERNEL);
51188+
51189+ if (!adapter->vf_cfg)
51190+ goto free_netdev;
51191+ }
51192+
51193 status = be_ctrl_init(adapter);
51194 if (status)
51195- goto free_netdev;
51196+ goto free_vf_cfg;
51197+
51198+ if (lancer_chip(adapter)) {
51199+ status = lancer_test_and_set_rdy_state(adapter);
51200+ if (status) {
51201+ dev_err(&pdev->dev, "Adapter in non recoverable error\n");
51202+ goto ctrl_clean;
51203+ }
51204+ }
51205+
51206+ /* sync up with fw's ready state */
51207+ if (be_physfn(adapter)) {
51208+ status = be_cmd_POST(adapter);
51209+ if (status)
51210+ goto ctrl_clean;
51211+ }
51212+
51213+ /* tell fw we're ready to fire cmds */
51214+ status = be_cmd_fw_init(adapter);
51215+ if (status)
51216+ goto ctrl_clean;
51217+
51218+ status = be_cmd_reset_function(adapter);
51219+ if (status)
51220+ goto ctrl_clean;
51221
51222 status = be_stats_init(adapter);
51223 if (status)
51224 goto ctrl_clean;
51225
51226- status = be_hw_up(adapter);
51227+ status = be_get_config(adapter);
51228 if (status)
51229 goto stats_clean;
51230
51231- status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
51232- true /* permanent */, 0);
51233- if (status)
51234- goto stats_clean;
51235- memcpy(netdev->dev_addr, mac, ETH_ALEN);
51236+ /* This bit is zero in normal boot case, but in crash kernel case this
51237+ is not cleared. clear this bit here, until we are ready with the irqs
51238+ i.e in be_open call.*/
51239+ if (!lancer_chip(adapter))
51240+ be_intr_set(adapter, false);
51241+
51242+ if (msix)
51243+ be_msix_enable(adapter);
51244
51245 INIT_DELAYED_WORK(&adapter->work, be_worker);
51246- be_netdev_init(netdev);
51247- SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
51248
51249 status = be_setup(adapter);
51250 if (status)
51251- goto stats_clean;
51252+ goto msix_disable;
51253+
51254+ /* Initilize the link status to -1 */
51255+ adapter->link_status = -1;
51256+
51257+ status = be_netdev_init(netdev);
51258+ if (status)
51259+ goto unsetup;
51260+
51261 status = register_netdev(netdev);
51262 if (status != 0)
51263 goto unsetup;
51264
51265- dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
51266+ be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
51267+
51268+ if (be_physfn(adapter) && adapter->num_vfs) {
51269+ u8 mac_speed;
51270+ int link_status;
51271+ u16 def_vlan, vf, lnk_speed;
51272+
51273+ status = be_vf_eth_addr_config(adapter);
51274+ if (status)
51275+ goto unreg_netdev;
51276+
51277+ for (vf = 0; vf < adapter->num_vfs; vf++) {
51278+ status = be_cmd_get_hsw_config(adapter, &def_vlan,
51279+ vf + 1, adapter->vf_cfg[vf].vf_if_handle);
51280+ if (!status)
51281+ adapter->vf_cfg[vf].vf_def_vid = def_vlan;
51282+ else
51283+ goto unreg_netdev;
51284+
51285+ status = be_cmd_link_status_query(adapter, &link_status,
51286+ &mac_speed, &lnk_speed, vf + 1);
51287+ if (!status)
51288+ adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
51289+ else
51290+ goto unreg_netdev;
51291+ }
51292+ }
51293+ if (be_physfn(adapter)) {
51294+ /* Temp fix ofr bug# 23034. Till ARM
51295+ * f/w fixes privilege lvl */
51296+ be_get_port_names(adapter);
51297+ }
51298+
51299+ /* Enable Vlan capability based on privileges.
51300+ * PF will have Vlan capability anyway. */
51301+ be_cmd_get_fn_privileges(adapter, &en, 0);
51302+
51303+ if ((en & (BE_PRIV_FILTMGMT | BE_PRIV_VHADM | BE_PRIV_DEVCFG)) ||
51304+ be_physfn(adapter))
51305+ netdev->features |= NETIF_F_HW_VLAN_FILTER;
51306+ else
51307+ netdev->features |= NETIF_F_VLAN_CHALLENGED;
51308+
51309+ dev_info(&pdev->dev, "%s: numa node %d\n", netdev->name,
51310+ dev_to_node(&pdev->dev));
51311+ dev_info(&pdev->dev, "%s %s \"%s\" port %d\n", nic_name(pdev),
51312+ (adapter->port_num > 1 ? "1Gbps NIC" : "10Gbps NIC"),
51313+ adapter->model_number, adapter->hba_port_num);
51314+
51315+
51316+#ifdef CONFIG_PALAU
51317+ be_sysfs_create_group(adapter);
51318+#endif
51319+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
51320 return 0;
51321
51322+unreg_netdev:
51323+ unregister_netdev(netdev);
51324 unsetup:
51325 be_clear(adapter);
51326+msix_disable:
51327+ be_msix_disable(adapter);
51328 stats_clean:
51329 be_stats_cleanup(adapter);
51330 ctrl_clean:
51331 be_ctrl_cleanup(adapter);
51332+free_vf_cfg:
51333+ kfree(adapter->vf_cfg);
51334 free_netdev:
51335- free_netdev(adapter->netdev);
51336+ be_sriov_disable(adapter);
51337+ be_netif_napi_del(netdev);
51338+ free_netdev(netdev);
51339+ pci_set_drvdata(pdev, NULL);
51340 rel_reg:
51341 pci_release_regions(pdev);
51342 disable_dev:
51343@@ -2193,6 +3913,10 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
51344 struct be_adapter *adapter = pci_get_drvdata(pdev);
51345 struct net_device *netdev = adapter->netdev;
51346
51347+ cancel_delayed_work_sync(&adapter->work);
51348+ if (adapter->wol)
51349+ be_setup_wol(adapter, true);
51350+
51351 netif_device_detach(netdev);
51352 if (netif_running(netdev)) {
51353 rtnl_lock();
51354@@ -2202,6 +3926,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
51355 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
51356 be_clear(adapter);
51357
51358+ be_msix_disable(adapter);
51359 pci_save_state(pdev);
51360 pci_disable_device(pdev);
51361 pci_set_power_state(pdev, pci_choose_state(pdev, state));
51362@@ -2223,6 +3948,12 @@ static int be_resume(struct pci_dev *pdev)
51363 pci_set_power_state(pdev, 0);
51364 pci_restore_state(pdev);
51365
51366+ be_msix_enable(adapter);
51367+ /* tell fw we're ready to fire cmds */
51368+ status = be_cmd_fw_init(adapter);
51369+ if (status)
51370+ return status;
51371+
51372 be_setup(adapter);
51373 if (netif_running(netdev)) {
51374 rtnl_lock();
51375@@ -2230,28 +3961,152 @@ static int be_resume(struct pci_dev *pdev)
51376 rtnl_unlock();
51377 }
51378 netif_device_attach(netdev);
51379+
51380+ if (adapter->wol)
51381+ be_setup_wol(adapter, false);
51382+
51383+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
51384 return 0;
51385 }
51386
51387+/*
51388+ * An FLR will stop BE from DMAing any data.
51389+ */
51390+static void be_shutdown(struct pci_dev *pdev)
51391+{
51392+ struct be_adapter *adapter = pci_get_drvdata(pdev);
51393+
51394+ if (!adapter)
51395+ return;
51396+
51397+ cancel_delayed_work_sync(&adapter->work);
51398+
51399+ netif_device_detach(adapter->netdev);
51400+
51401+ if (adapter->wol)
51402+ be_setup_wol(adapter, true);
51403+
51404+ be_cmd_reset_function(adapter);
51405+
51406+ pci_disable_device(pdev);
51407+}
51408+
51409+static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
51410+ pci_channel_state_t state)
51411+{
51412+ struct be_adapter *adapter = pci_get_drvdata(pdev);
51413+ struct net_device *netdev = adapter->netdev;
51414+
51415+ dev_err(&adapter->pdev->dev, "EEH error detected\n");
51416+
51417+ adapter->eeh_err = true;
51418+
51419+ netif_device_detach(netdev);
51420+
51421+ if (netif_running(netdev)) {
51422+ rtnl_lock();
51423+ be_close(netdev);
51424+ rtnl_unlock();
51425+ }
51426+ be_clear(adapter);
51427+
51428+ if (state == pci_channel_io_perm_failure)
51429+ return PCI_ERS_RESULT_DISCONNECT;
51430+
51431+ pci_disable_device(pdev);
51432+
51433+ return PCI_ERS_RESULT_NEED_RESET;
51434+}
51435+
51436+static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
51437+{
51438+ struct be_adapter *adapter = pci_get_drvdata(pdev);
51439+ int status;
51440+
51441+ dev_info(&adapter->pdev->dev, "EEH reset\n");
51442+ adapter->eeh_err = false;
51443+
51444+ status = pci_enable_device(pdev);
51445+ if (status)
51446+ return PCI_ERS_RESULT_DISCONNECT;
51447+
51448+ pci_set_master(pdev);
51449+ pci_set_power_state(pdev, 0);
51450+ pci_restore_state(pdev);
51451+
51452+ /* Check if card is ok and fw is ready */
51453+ status = be_cmd_POST(adapter);
51454+ if (status)
51455+ return PCI_ERS_RESULT_DISCONNECT;
51456+
51457+ return PCI_ERS_RESULT_RECOVERED;
51458+}
51459+
51460+static void be_eeh_resume(struct pci_dev *pdev)
51461+{
51462+ int status = 0;
51463+ struct be_adapter *adapter = pci_get_drvdata(pdev);
51464+ struct net_device *netdev = adapter->netdev;
51465+
51466+ dev_info(&adapter->pdev->dev, "EEH resume\n");
51467+
51468+ pci_save_state(pdev);
51469+
51470+ /* tell fw we're ready to fire cmds */
51471+ status = be_cmd_fw_init(adapter);
51472+ if (status)
51473+ goto err;
51474+
51475+ status = be_setup(adapter);
51476+ if (status)
51477+ goto err;
51478+
51479+ if (netif_running(netdev)) {
51480+ status = be_open(netdev);
51481+ if (status)
51482+ goto err;
51483+ }
51484+ netif_device_attach(netdev);
51485+ return;
51486+err:
51487+ dev_err(&adapter->pdev->dev, "EEH resume failed\n");
51488+ return;
51489+}
51490+
51491+static struct pci_error_handlers be_eeh_handlers = {
51492+ .error_detected = be_eeh_err_detected,
51493+ .slot_reset = be_eeh_reset,
51494+ .resume = be_eeh_resume,
51495+};
51496+
51497 static struct pci_driver be_driver = {
51498 .name = DRV_NAME,
51499 .id_table = be_dev_ids,
51500 .probe = be_probe,
51501 .remove = be_remove,
51502 .suspend = be_suspend,
51503- .resume = be_resume
51504+ .resume = be_resume,
51505+ .shutdown = be_shutdown,
51506+ .err_handler = &be_eeh_handlers
51507 };
51508
51509 static int __init be_init_module(void)
51510 {
51511- if (rx_frag_size != 8192 && rx_frag_size != 4096
51512- && rx_frag_size != 2048) {
51513+ if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
51514+ rx_frag_size != 2048) {
51515 printk(KERN_WARNING DRV_NAME
51516 " : Module param rx_frag_size must be 2048/4096/8192."
51517 " Using 2048\n");
51518 rx_frag_size = 2048;
51519 }
51520
51521+ if (!msix && num_vfs > 0) {
51522+ printk(KERN_WARNING DRV_NAME
51523+ " : MSIx required for num_vfs > 0. Ignoring msix=0\n");
51524+ msix = 1;
51525+ }
51526+
51527+
51528 return pci_register_driver(&be_driver);
51529 }
51530 module_init(be_init_module);
51531diff --git a/drivers/net/benet/be_misc.c b/drivers/net/benet/be_misc.c
51532new file mode 100644
51533index 0000000..4ab499f
51534--- /dev/null
51535+++ b/drivers/net/benet/be_misc.c
51536@@ -0,0 +1,106 @@
51537+/*
51538+ * Copyright (C) 2005 - 2011 Emulex
51539+ * All rights reserved.
51540+ *
51541+ * This program is free software; you can redistribute it and/or
51542+ * modify it under the terms of the GNU General Public License version 2
51543+ * as published by the Free Software Foundation. The full GNU General
51544+ * Public License is included in this distribution in the file called COPYING.
51545+ *
51546+ * Contact Information:
51547+ * linux-drivers@emulex.com
51548+ *
51549+ * Emulex
51550+ * 3333 Susan Street
51551+ * Costa Mesa, CA 92626
51552+ */
51553+#include "be.h"
51554+
51555+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
51556+static ssize_t
51557+flash_fw_store(struct class_device *cd, const char *buf, size_t len)
51558+{
51559+ struct be_adapter *adapter =
51560+ netdev_priv(container_of(cd, struct net_device, class_dev));
51561+ char file_name[ETHTOOL_FLASH_MAX_FILENAME];
51562+ int status;
51563+
51564+ if (!capable(CAP_NET_ADMIN))
51565+ return -EPERM;
51566+
51567+ file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
51568+ strncpy(file_name, buf, (ETHTOOL_FLASH_MAX_FILENAME - 1));
51569+
51570+ /* Removing new-line char given by sysfs */
51571+ file_name[strlen(file_name) - 1] = '\0';
51572+
51573+ status = be_load_fw(adapter, file_name);
51574+ if (!status)
51575+ return len;
51576+ else
51577+ return status;
51578+}
51579+
51580+static CLASS_DEVICE_ATTR(flash_fw, S_IWUSR, NULL, flash_fw_store);
51581+
51582+static struct attribute *benet_attrs[] = {
51583+ &class_device_attr_flash_fw.attr,
51584+ NULL,
51585+};
51586+#else
51587+
51588+static ssize_t
51589+flash_fw_store(struct device *dev, struct device_attribute *attr,
51590+ const char *buf, size_t len)
51591+{
51592+ struct be_adapter *adapter =
51593+ netdev_priv(container_of(dev, struct net_device, dev));
51594+ char file_name[ETHTOOL_FLASH_MAX_FILENAME];
51595+ int status;
51596+
51597+ if (!capable(CAP_NET_ADMIN))
51598+ return -EPERM;
51599+
51600+ file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
51601+ strncpy(file_name, buf, (ETHTOOL_FLASH_MAX_FILENAME - 1));
51602+
51603+ /* Removing new-line char given by sysfs */
51604+ file_name[strlen(file_name) - 1] = '\0';
51605+
51606+ status = be_load_fw(adapter, file_name);
51607+ if (!status)
51608+ return len;
51609+ else
51610+ return status;
51611+}
51612+
51613+static DEVICE_ATTR(flash_fw, S_IWUSR, NULL, flash_fw_store);
51614+
51615+static struct attribute *benet_attrs[] = {
51616+ &dev_attr_flash_fw.attr,
51617+ NULL,
51618+};
51619+#endif
51620+
51621+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
51622+#define CLASS_DEV class_dev
51623+#else
51624+#define CLASS_DEV dev
51625+#endif
51626+
51627+static struct attribute_group benet_attr_group = {.attrs = benet_attrs };
51628+
51629+void be_sysfs_create_group(struct be_adapter *adapter)
51630+{
51631+ int status;
51632+
51633+ status = sysfs_create_group(&adapter->netdev->CLASS_DEV.kobj,
51634+ &benet_attr_group);
51635+ if (status)
51636+ dev_err(&adapter->pdev->dev, "Could not create sysfs group\n");
51637+}
51638+
51639+void be_sysfs_remove_group(struct be_adapter *adapter)
51640+{
51641+ sysfs_remove_group(&adapter->netdev->CLASS_DEV.kobj, &benet_attr_group);
51642+}
51643diff --git a/drivers/net/benet/be_proc.c b/drivers/net/benet/be_proc.c
51644new file mode 100644
51645index 0000000..0bfdb3b
51646--- /dev/null
51647+++ b/drivers/net/benet/be_proc.c
51648@@ -0,0 +1,513 @@
51649+/*
51650+ * Copyright (C) 2005 - 2011 ServerEngines
51651+ * All rights reserved.
51652+ *
51653+ * This program is free software; you can redistribute it and/or
51654+ * modify it under the terms of the GNU General Public License version 2
51655+ * as published by the Free Software Foundation. The full GNU General
51656+ * Public License is included in this distribution in the file called COPYING.
51657+ *
51658+ * Contact Information:
51659+ * linux-drivers@serverengines.com
51660+ *
51661+ * ServerEngines
51662+ * 209 N. Fair Oaks Ave
51663+ * Sunnyvale, CA 94085
51664+ */
51665+#include <linux/proc_fs.h>
51666+#include "be.h"
51667+
51668+char *be_adpt_name[] = {
51669+ "driver/be2net0",
51670+ "driver/be2net1",
51671+ "driver/be2net2",
51672+ "driver/be2net3",
51673+ "driver/be2net4",
51674+ "driver/be2net5",
51675+ "driver/be2net6",
51676+ "driver/be2net7"
51677+};
51678+
51679+#define MAX_BE_DEVICES 8
51680+struct proc_dir_entry *be_proc_dir[MAX_BE_DEVICES];
51681+
51682+/*File to read Eth Ring Information */
51683+#define BE_ETH_RING_FILE "eth_ring"
51684+#define BE_DRVR_STAT_FILE "drvr_stat"
51685+
51686+/*
51687+ * this file enables user to read a 32 bit CSR register.
51688+ * to read 32 bit value of a register at offset 0x1234,
51689+ * first write the offset 0x1234 (echo "0x1234") in
51690+ * the file and then read the value from this file.
51691+ * the written offset is latched until another value is written
51692+ */
51693+#define BE_CSR_R_FILE "csrr"
51694+/*
51695+ * this file enables user to write to a 32 bit CSR register.
51696+ * to write a value 0xdeadbeef to a register at offset 0x1234,
51697+ * write 0x1234 0xdeadbeef (echo "0x1234 0xdeadbeeb") to
51698+ * the file.
51699+ */
51700+#define BE_CSR_W_FILE "csrw"
51701+
51702+#define BE_PROC_MODE 0600
51703+
51704+static char read_eth_ring_buf[4096];
51705+static int read_eth_ring_count;
51706+
51707+/*
51708+ * Get Various Eth Ring Properties
51709+ */
51710+static int proc_eth_read_ring(char *page, char **start,
51711+ off_t off, int count, int *eof, void *data)
51712+{
51713+ int i, n;
51714+ char *p = read_eth_ring_buf;
51715+ struct be_adapter *adapter = (struct be_adapter *) data;
51716+
51717+ if (off == 0) {
51718+ /* Reset read_eth_ring_count */
51719+ read_eth_ring_count = 0;
51720+
51721+ n = sprintf(p, " PhyAddr VirtAddr Size TotalEntries ProducerIndex ConsumerIndex NumUsed\n");
51722+ p += n;
51723+ read_eth_ring_count += n;
51724+
51725+ n = sprintf(p, " ------- -------- ---- ------------ ------------- ------------- -------\n");
51726+ p += n;
51727+ read_eth_ring_count += n;
51728+
51729+ n = sprintf(p, "%s", "EthSendRing");
51730+ p += n;
51731+ read_eth_ring_count += n;
51732+
51733+ n = sprintf(p, " %7lx %8p %4u %12u %13u %13u %7u \n",
51734+ (long) adapter->tx_obj.q.dma_mem.dma,
51735+ (void *)adapter->tx_obj.q.dma_mem.va,
51736+ (u32) (adapter->tx_obj.q.len *
51737+ sizeof(struct be_eth_wrb)),
51738+ adapter->tx_obj.q.len, adapter->tx_obj.q.head,
51739+ adapter->tx_obj.q.tail,
51740+ atomic_read(&adapter->tx_obj.q.used));
51741+
51742+ p += n;
51743+ read_eth_ring_count += n;
51744+
51745+ /* Get Eth Send Compl Queue Details */
51746+ n = sprintf(p, "%s", "EthSendCmplRing");
51747+ p += n;
51748+ read_eth_ring_count += n;
51749+
51750+ n = sprintf(p, " %7lx %8p %4u %12u %13s %13u %7s\n",
51751+ (long)adapter->tx_obj.cq.dma_mem.dma,
51752+ (void *)adapter->tx_obj.cq.dma_mem.va,
51753+ (u32) (adapter->tx_obj.cq.len *
51754+ sizeof(struct be_eth_tx_compl)),
51755+ adapter->tx_obj.cq.len, "NA",
51756+ adapter->tx_obj.cq.tail, "NA");
51757+
51758+ p += n;
51759+ read_eth_ring_count += n;
51760+ /* Get Eth Rx Queue Details */
51761+ n = sprintf(p, "%s", "EthRxRing");
51762+ p += n;
51763+ read_eth_ring_count += n;
51764+
51765+ n = sprintf(p, " %7lx %8p %4u %12u %13u %13s %7u \n",
51766+ (long)adapter->rx_obj.q.dma_mem.dma,
51767+ (void *)adapter->rx_obj.q.dma_mem.va,
51768+ (u32) (adapter->rx_obj.q.len *
51769+ sizeof(struct be_eth_rx_d)),
51770+ adapter->rx_obj.q.len, adapter->rx_obj.q.head,"NA",
51771+ atomic_read(&adapter->rx_obj.q.used));
51772+ p += n;
51773+ read_eth_ring_count += n;
51774+
51775+ /* Get Eth Unicast Rx Compl Queue Details */
51776+ n = sprintf(p, "%s", "EthRxCmplRing");
51777+ p += n;
51778+ read_eth_ring_count += n;
51779+
51780+ n = sprintf(p, " %7lx %8p %4u %12u %13s %13u %7s\n",
51781+ (long)adapter->rx_obj.cq.dma_mem.dma,
51782+ (void *)adapter->rx_obj.cq.dma_mem.va,
51783+ (u32) (adapter->rx_obj.cq.len *
51784+ sizeof(struct be_eth_rx_compl)),
51785+ adapter->rx_obj.cq.len, "NA",
51786+ adapter->rx_obj.cq.tail, "NA");
51787+ p += n;
51788+ read_eth_ring_count += n;
51789+
51790+ /* Get Eth Event Queue Details */
51791+ n = sprintf(p, "%s", "EthTxEventRing");
51792+ p += n;
51793+ read_eth_ring_count += n;
51794+
51795+ n = sprintf(p,
51796+ " %7lx %8p %4u %12u %13s %13u %7s\n",
51797+ (long) adapter->tx_eq.q.dma_mem.dma,
51798+ (void *)adapter->tx_eq.q.dma_mem.va,
51799+ (u32) (adapter->tx_eq.q.len *
51800+ sizeof(struct be_eq_entry)),
51801+ adapter->tx_eq.q.len, "NA",
51802+ adapter->tx_eq.q.tail, "NA");
51803+
51804+ p += n;
51805+ read_eth_ring_count += n;
51806+
51807+ /* Get Eth Event Queue Details */
51808+ n = sprintf(p, "%s", "EthRxEventRing");
51809+ p += n;
51810+ read_eth_ring_count += n;
51811+
51812+ n = sprintf(p,
51813+ " %7lx %8p %4u %12u %13s %13u %7s\n",
51814+ (long) adapter->rx_eq.q.dma_mem.dma,
51815+ (void *)adapter->rx_eq.q.dma_mem.va,
51816+ (u32) (adapter->rx_eq.q.len *
51817+ sizeof(struct be_eq_entry)),
51818+ adapter->rx_eq.q.len, "NA",
51819+ adapter->rx_eq.q.tail, "NA");
51820+
51821+ p += n;
51822+ read_eth_ring_count += n;
51823+ }
51824+
51825+ *start = page;
51826+ /* copy whatever we can */
51827+ if (count < (read_eth_ring_count - off)) {
51828+ i = count;
51829+ *eof = 0; /* More bytes left */
51830+ } else {
51831+ i = read_eth_ring_count - off;
51832+ *eof = 1; /* Nothing left. indicate EOF */
51833+ }
51834+
51835+ memcpy(page, read_eth_ring_buf + off, i);
51836+ return (i);
51837+}
51838+
51839+static int proc_eth_write_ring(struct file *file,
51840+ const char *buffer, unsigned long count,
51841+ void *data)
51842+{
51843+ return (count); /* we do not support write */
51844+}
51845+
51846+/*
51847+ * read the driver stats.
51848+ */
51849+static int proc_read_drvr_stat(char *page, char **start,
51850+ off_t off, int count, int *eof, void *data)
51851+{
51852+ int n, lro_cp;
51853+ char *p = page;
51854+ struct be_adapter *adapter = (struct be_adapter *) data;
51855+ struct net_device *netdev = adapter->netdev;
51856+
51857+ if (off == 0) {
51858+ n = sprintf(p, "interface = %s\n", netdev->name);
51859+ p += n;
51860+ n = sprintf(p, "tx_reqs = %d\n",
51861+ drvr_stats(adapter)->be_tx_reqs);
51862+ p += n;
51863+ n = sprintf(p, "tx_stops = %d\n",
51864+ drvr_stats(adapter)->be_tx_stops);
51865+ p += n;
51866+ n = sprintf(p, "fwd_reqs = %d\n",
51867+ drvr_stats(adapter)->be_fwd_reqs);
51868+ p += n;
51869+ n = sprintf(p, "tx_wrbs = %d\n",
51870+ drvr_stats(adapter)->be_tx_wrbs);
51871+ p += n;
51872+ n = sprintf(p, "rx_poll = %d\n", drvr_stats(adapter)->be_rx_polls);
51873+ p += n;
51874+ n = sprintf(p, "tx_events = %d\n",
51875+ drvr_stats(adapter)->be_tx_events);
51876+ p += n;
51877+ n = sprintf(p, "rx_events = %d\n",
51878+ drvr_stats(adapter)->be_rx_events);
51879+ p += n;
51880+ n = sprintf(p, "tx_compl = %d\n",
51881+ drvr_stats(adapter)->be_tx_compl);
51882+ p += n;
51883+ n = sprintf(p, "rx_compl = %d\n",
51884+ drvr_stats(adapter)->be_rx_compl);
51885+ p += n;
51886+ n = sprintf(p, "ethrx_post_fail = %d\n",
51887+ drvr_stats(adapter)->be_ethrx_post_fail);
51888+ p += n;
51889+ n = sprintf(p, "802.3_dropped_frames = %d\n",
51890+ drvr_stats(adapter)->be_802_3_dropped_frames);
51891+ p += n;
51892+ n = sprintf(p, "802.3_malformed_frames = %d\n",
51893+ drvr_stats(adapter)->be_802_3_malformed_frames);
51894+ p += n;
51895+ n = sprintf(p, "eth_tx_rate = %d\n",
51896+ drvr_stats(adapter)->be_tx_rate);
51897+ p += n;
51898+ n = sprintf(p, "eth_rx_rate = %d\n",
51899+ drvr_stats(adapter)->be_rx_rate);
51900+ p += n;
51901+
51902+ lro_cp = (drvr_stats(adapter)->be_lro_hgram_data[0] +
51903+ drvr_stats(adapter)->be_lro_hgram_data[1] +
51904+ drvr_stats(adapter)->be_lro_hgram_data[2] +
51905+ drvr_stats(adapter)->be_lro_hgram_data[3] +
51906+ drvr_stats(adapter)->be_lro_hgram_data[4] +
51907+ drvr_stats(adapter)->be_lro_hgram_data[5] +
51908+ drvr_stats(adapter)->be_lro_hgram_data[6] +
51909+ drvr_stats(adapter)->be_lro_hgram_data[7])/100;
51910+ lro_cp = (lro_cp == 0) ? 1 : lro_cp; /* avoid divide by 0 */
51911+ n = sprintf(p,
51912+ "LRO data count %% histogram (1, 2-3, 4-5,..,>=16) = "
51913+ "%d, %d, %d, %d - %d, %d, %d, %d\n",
51914+ drvr_stats(adapter)->be_lro_hgram_data[0]/lro_cp,
51915+ drvr_stats(adapter)->be_lro_hgram_data[1]/lro_cp,
51916+ drvr_stats(adapter)->be_lro_hgram_data[2]/lro_cp,
51917+ drvr_stats(adapter)->be_lro_hgram_data[3]/lro_cp,
51918+ drvr_stats(adapter)->be_lro_hgram_data[4]/lro_cp,
51919+ drvr_stats(adapter)->be_lro_hgram_data[5]/lro_cp,
51920+ drvr_stats(adapter)->be_lro_hgram_data[6]/lro_cp,
51921+ drvr_stats(adapter)->be_lro_hgram_data[7]/lro_cp);
51922+ p += n;
51923+
51924+ lro_cp = (drvr_stats(adapter)->be_lro_hgram_ack[0] +
51925+ drvr_stats(adapter)->be_lro_hgram_ack[1] +
51926+ drvr_stats(adapter)->be_lro_hgram_ack[2] +
51927+ drvr_stats(adapter)->be_lro_hgram_ack[3] +
51928+ drvr_stats(adapter)->be_lro_hgram_ack[4] +
51929+ drvr_stats(adapter)->be_lro_hgram_ack[5] +
51930+ drvr_stats(adapter)->be_lro_hgram_ack[6] +
51931+ drvr_stats(adapter)->be_lro_hgram_ack[7])/100;
51932+ lro_cp = (lro_cp == 0) ? 1 : lro_cp; /* avoid divide by 0 */
51933+ n = sprintf(p,
51934+ "LRO ack count %% histogram (1, 2-3, 4-5,..,>=16) = "
51935+ "%d, %d, %d, %d - %d, %d, %d, %d\n",
51936+ drvr_stats(adapter)->be_lro_hgram_ack[0]/lro_cp,
51937+ drvr_stats(adapter)->be_lro_hgram_ack[1]/lro_cp,
51938+ drvr_stats(adapter)->be_lro_hgram_ack[2]/lro_cp,
51939+ drvr_stats(adapter)->be_lro_hgram_ack[3]/lro_cp,
51940+ drvr_stats(adapter)->be_lro_hgram_ack[4]/lro_cp,
51941+ drvr_stats(adapter)->be_lro_hgram_ack[5]/lro_cp,
51942+ drvr_stats(adapter)->be_lro_hgram_ack[6]/lro_cp,
51943+ drvr_stats(adapter)->be_lro_hgram_ack[7]/lro_cp);
51944+ p += n;
51945+ n = sprintf(p, "rx_eq_delay = %d \n", adapter->rx_eq.cur_eqd);
51946+ p += n;
51947+ n = sprintf(p, "rx frags per sec=%d \n",
51948+ drvr_stats(adapter)->be_rx_fps);
51949+ p += n;
51950+
51951+ }
51952+ *eof = 1;
51953+ return (p - page);
51954+}
51955+
51956+static int proc_write_drvr_stat(struct file *file,
51957+ const char *buffer, unsigned long count,
51958+ void *data)
51959+{
51960+ struct be_adapter *adapter = (struct be_adapter *) data;
51961+
51962+ memset(&(adapter->stats.drvr_stats), 0,
51963+ sizeof(adapter->stats.drvr_stats));
51964+ return (count); /* we do not support write */
51965+}
51966+
51967+#if 0
51968+/* the following are some of the functions that are needed here
51969+ * until all initializations are done by MPU.
51970+ */
51971+
51972+u32
51973+CsrReadDr(void* BaseAddress, u32 Offset)
51974+{
51975+ u32 *rp;
51976+
51977+ rp = (u32 *) (((u8 *) BaseAddress) + Offset);
51978+ return (*rp);
51979+}
51980+
51981+/*!
51982+
51983+@brief
51984+ This routine writes to a register located within the CSR
51985+ space for a given function object.
51986+
51987+@param
51988+ FuncObj - Pointer to the function object to read from.
51989+
51990+@param
51991+ Offset - The Offset (in bytes) to write to within the function's CSR space.
51992+
51993+@param
51994+ Value - The value to write to the register.
51995+
51996+@return
51997+
51998+@note
51999+ IRQL: any
52000+
52001+*/
52002+void
52003+CsrWriteDr(void* BaseAddress, u32 Offset, u32 Value)
52004+{
52005+ u32 *Register;
52006+
52007+ Register = (u32 *) (((u8 *) BaseAddress) + Offset);
52008+
52009+ //TRACE(DL_INFO, "CsrWrite[ %X ] <= %X", Register, Value);
52010+ *Register = Value;
52011+}
52012+u32 be_proc_csrr_offset = -1; /* to latch the offset of next CSR Read req. */
52013+
52014+/*
52015+ * read the csr_r file. return the 32 bit register value from
52016+ * CSR space at offset latched in the global location
52017+ * be_proc_csrr_offset
52018+ */
52019+static int proc_read_csr_r(char *page, char **start,
52020+ off_t off, int count, int *eof, void *data)
52021+{
52022+ struct be_adapter * adapter = (struct be_adapter *)data;
52023+ u32 val;
52024+ int n = 0;
52025+ if (be_proc_csrr_offset == -1)
52026+ return -EINVAL;
52027+
52028+ if (off == 0) {
52029+ /* read the CSR at offset be_proc_csrr_offset and return */
52030+ val = CsrReadDr(adapter->csr_va, be_proc_csrr_offset);
52031+ n = sprintf(page, "0x%x\n", val);
52032+ }
52033+ *eof = 1;
52034+ return n;
52035+}
52036+
52037+/*
52038+ * save the written value in be_proc_csrr_offset for next
52039+ * read from the file
52040+ */
52041+static int proc_write_csr_r(struct file *file,
52042+ const char *buffer, unsigned long count, void *data)
52043+{
52044+ char buf[64];
52045+ u32 n;
52046+
52047+ if (count > sizeof(buf) + 1)
52048+ return -EINVAL;
52049+ if (copy_from_user(buf, buffer, count))
52050+ return -EFAULT;
52051+ buf[count] = '\0';
52052+
52053+ n = simple_strtoul(buf, NULL, 16);
52054+ if (n < 0x50000)
52055+ be_proc_csrr_offset = n;
52056+ return (count);
52057+}
52058+
52059+/*
52060+ * return the latched offset for reading the csr_r file.
52061+ */
52062+static int proc_read_csr_w(char *page, char **start,
52063+ off_t off, int count, int *eof, void *data)
52064+{
52065+
52066+ *eof = 1;
52067+ return sprintf(page, "0x%x\n", be_proc_csrr_offset);
52068+}
52069+
52070+/*
52071+ * the incoming string is of the form "<offset> <value>"
52072+ * where the offset is the offset of the register to be written
52073+ * and value is the value to be written.
52074+ */
52075+static int proc_write_csr_w(struct file *file,
52076+ const char *buffer, unsigned long count,
52077+ void *data)
52078+{
52079+ char buf[64];
52080+ char *p;
52081+ u32 n, val;
52082+ struct be_adapter * adapter = (struct be_adapter *)data;
52083+
52084+ if (count > sizeof(buf) + 1)
52085+ return -EINVAL;
52086+ if (copy_from_user(buf, buffer, count))
52087+ return -EFAULT;
52088+ buf[count] = '\0';
52089+
52090+ n = simple_strtoul(buf, &p, 16);
52091+ if (n > 0x50000)
52092+ return -EINVAL;
52093+
52094+ /* now get the actual value to be written */
52095+ while (*p == ' ' || *p == '\t')
52096+ p++;
52097+ val = simple_strtoul(p, NULL, 16);
52098+ CsrWriteDr(adapter->csr_va, n, val);
52099+ return (count);
52100+}
52101+#endif
52102+
52103+void be_init_procfs(struct be_adapter *adapter, int adapt_num)
52104+{
52105+ static struct proc_dir_entry *pde;
52106+
52107+ if (adapt_num > MAX_BE_DEVICES - 1)
52108+ return;
52109+
52110+ /* create directory */
52111+ be_proc_dir[adapt_num] =
52112+ proc_mkdir(be_adpt_name[adapt_num], NULL);
52113+ if (be_proc_dir[adapt_num]) {
52114+ (be_proc_dir[adapt_num])->owner = THIS_MODULE;
52115+ }
52116+
52117+ pde = create_proc_entry(BE_ETH_RING_FILE, BE_PROC_MODE,
52118+ be_proc_dir[adapt_num]);
52119+ if (pde) {
52120+ pde->read_proc = proc_eth_read_ring;
52121+ pde->write_proc = proc_eth_write_ring;
52122+ pde->data = adapter;
52123+ pde->owner = THIS_MODULE;
52124+ }
52125+
52126+ pde = create_proc_entry(BE_DRVR_STAT_FILE, BE_PROC_MODE,
52127+ be_proc_dir[adapt_num]);
52128+ if (pde) {
52129+ pde->read_proc = proc_read_drvr_stat;
52130+ pde->write_proc = proc_write_drvr_stat;
52131+ pde->data = adapter;
52132+ pde->owner = THIS_MODULE;
52133+ }
52134+
52135+#if 0
52136+ if ((pde = create_proc_entry(BE_CSR_R_FILE, BE_PROC_MODE, be_proc_dir[adapt_num]))) {
52137+ pde->read_proc = proc_read_csr_r;
52138+ pde->write_proc = proc_write_csr_r;
52139+ pde->data = adapter;
52140+ pde->owner = THIS_MODULE;
52141+ }
52142+
52143+ if ((pde = create_proc_entry(BE_CSR_W_FILE, BE_PROC_MODE, be_proc_dir[adapt_num]))) {
52144+ pde->read_proc = proc_read_csr_w;
52145+ pde->write_proc = proc_write_csr_w;
52146+ pde->data = adapter;
52147+ pde->owner = THIS_MODULE;
52148+ }
52149+#endif
52150+}
52151+
52152+void be_cleanup_procfs(struct be_adapter *adapter, int adapt_num)
52153+{
52154+ if (adapt_num > MAX_BE_DEVICES - 1)
52155+ return;
52156+ remove_proc_entry(BE_ETH_RING_FILE, be_proc_dir[adapt_num]);
52157+ remove_proc_entry(BE_DRVR_STAT_FILE, be_proc_dir[adapt_num]);
52158+ remove_proc_entry(BE_CSR_R_FILE, be_proc_dir[adapt_num]);
52159+ remove_proc_entry(BE_CSR_W_FILE, be_proc_dir[adapt_num]);
52160+ remove_proc_entry(be_adpt_name[adapt_num], NULL);
52161+}
52162diff --git a/drivers/net/benet/version.h b/drivers/net/benet/version.h
52163new file mode 100644
52164index 0000000..c7ed692
52165--- /dev/null
52166+++ b/drivers/net/benet/version.h
52167@@ -0,0 +1,51 @@
52168+#define STR_BE_BRANCH "0" \r
52169+#define STR_BE_BUILD "479" \r
52170+#define STR_BE_DOT "0"\r
52171+#define STR_BE_MINOR "0"\r
52172+#define STR_BE_MAJOR "4"\r
52173+\r
52174+#define BE_BRANCH 0 \r
52175+#define BE_BUILD 479 \r
52176+#define BE_DOT 0\r
52177+#define BE_MINOR 0\r
52178+#define BE_MAJOR 4\r
52179+\r
52180+#define MGMT_BRANCH 0\r
52181+#define MGMT_BUILDNUM 479\r
52182+#define MGMT_MINOR 0\r
52183+#define MGMT_MAJOR 4\r
52184+\r
52185+#define BE_REDBOOT_VERSION "2.0.5.0"\r
52186+\r
52187+//start-auto\r
52188+#define BUILD_MONTH "12"\r
52189+#define BUILD_MONTH_NAME "December"\r
52190+#define BUILD_DAY "6"\r
52191+#define BUILD_YEAR "2011"\r
52192+#define BUILD_24HOUR "21"\r
52193+#define BUILD_12HOUR "9"\r
52194+#define BUILD_AM_PM "PM"\r
52195+#define BUILD_MIN "48"\r
52196+#define BUILD_SEC "05"\r
52197+#define BUILD_MONTH_NUMBER 12\r
52198+#define BUILD_DAY_NUMBER 6\r
52199+#define BUILD_YEAR_NUMBER 2011\r
52200+#define BUILD_24HOUR_NUMBER 21\r
52201+#define BUILD_12HOUR_NUMBER 9\r
52202+#define BUILD_MIN_NUMBER 48\r
52203+#define BUILD_SEC_NUMBER 5\r
52204+#undef MAJOR_BUILD\r
52205+#undef MINOR_BUILD\r
52206+#undef DOT_BUILD\r
52207+#define NUMBERED_BUILD\r
52208+#undef BRANCH_BUILD\r
52209+//end-auto\r
52210+\r
52211+#define ELX_FCOE_XROM_BIOS_VER "7.03a1"\r
52212+#define ELX_FCoE_X86_VER "4.02a1"\r
52213+#define ELX_FCoE_EFI_VER "5.01a1"\r
52214+#define ELX_FCoE_FCODE_VER "4.01a0"\r
52215+#define ELX_PXE_BIOS_VER "3.00a5"\r
52216+#define ELX_UEFI_NIC_VER "2.10A10"\r
52217+#define ELX_UEFI_FCODE_VER "1.10A0"\r
52218+#define ELX_ISCSI_BIOS_VER "1.00A8"\r
52219diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
52220index 4874b2b..67f8526 100644
52221--- a/drivers/net/bnx2.c
52222+++ b/drivers/net/bnx2.c
52223@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
52224 int rc = 0;
52225 u32 magic, csum;
52226
52227+ pax_track_stack();
52228+
52229 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
52230 goto test_nvram_done;
52231
52232diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
52233index fd3eb07..8a6978d 100644
52234--- a/drivers/net/cxgb3/l2t.h
52235+++ b/drivers/net/cxgb3/l2t.h
52236@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
52237 */
52238 struct l2t_skb_cb {
52239 arp_failure_handler_func arp_failure_handler;
52240-};
52241+} __no_const;
52242
52243 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
52244
52245diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
52246index 032cfe0..411af379 100644
52247--- a/drivers/net/cxgb3/t3_hw.c
52248+++ b/drivers/net/cxgb3/t3_hw.c
52249@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
52250 int i, addr, ret;
52251 struct t3_vpd vpd;
52252
52253+ pax_track_stack();
52254+
52255 /*
52256 * Card information is normally at VPD_BASE but some early cards had
52257 * it at 0.
52258diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
52259index d1e0563..b9e129c 100644
52260--- a/drivers/net/e1000e/82571.c
52261+++ b/drivers/net/e1000e/82571.c
52262@@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
52263 {
52264 struct e1000_hw *hw = &adapter->hw;
52265 struct e1000_mac_info *mac = &hw->mac;
52266- struct e1000_mac_operations *func = &mac->ops;
52267+ e1000_mac_operations_no_const *func = &mac->ops;
52268 u32 swsm = 0;
52269 u32 swsm2 = 0;
52270 bool force_clear_smbi = false;
52271@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
52272 temp = er32(ICRXDMTC);
52273 }
52274
52275-static struct e1000_mac_operations e82571_mac_ops = {
52276+static const struct e1000_mac_operations e82571_mac_ops = {
52277 /* .check_mng_mode: mac type dependent */
52278 /* .check_for_link: media type dependent */
52279 .id_led_init = e1000e_id_led_init,
52280@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
52281 .setup_led = e1000e_setup_led_generic,
52282 };
52283
52284-static struct e1000_phy_operations e82_phy_ops_igp = {
52285+static const struct e1000_phy_operations e82_phy_ops_igp = {
52286 .acquire_phy = e1000_get_hw_semaphore_82571,
52287 .check_reset_block = e1000e_check_reset_block_generic,
52288 .commit_phy = NULL,
52289@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
52290 .cfg_on_link_up = NULL,
52291 };
52292
52293-static struct e1000_phy_operations e82_phy_ops_m88 = {
52294+static const struct e1000_phy_operations e82_phy_ops_m88 = {
52295 .acquire_phy = e1000_get_hw_semaphore_82571,
52296 .check_reset_block = e1000e_check_reset_block_generic,
52297 .commit_phy = e1000e_phy_sw_reset,
52298@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
52299 .cfg_on_link_up = NULL,
52300 };
52301
52302-static struct e1000_phy_operations e82_phy_ops_bm = {
52303+static const struct e1000_phy_operations e82_phy_ops_bm = {
52304 .acquire_phy = e1000_get_hw_semaphore_82571,
52305 .check_reset_block = e1000e_check_reset_block_generic,
52306 .commit_phy = e1000e_phy_sw_reset,
52307@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
52308 .cfg_on_link_up = NULL,
52309 };
52310
52311-static struct e1000_nvm_operations e82571_nvm_ops = {
52312+static const struct e1000_nvm_operations e82571_nvm_ops = {
52313 .acquire_nvm = e1000_acquire_nvm_82571,
52314 .read_nvm = e1000e_read_nvm_eerd,
52315 .release_nvm = e1000_release_nvm_82571,
52316diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
52317index 47db9bd..fa58ccd 100644
52318--- a/drivers/net/e1000e/e1000.h
52319+++ b/drivers/net/e1000e/e1000.h
52320@@ -375,9 +375,9 @@ struct e1000_info {
52321 u32 pba;
52322 u32 max_hw_frame_size;
52323 s32 (*get_variants)(struct e1000_adapter *);
52324- struct e1000_mac_operations *mac_ops;
52325- struct e1000_phy_operations *phy_ops;
52326- struct e1000_nvm_operations *nvm_ops;
52327+ const struct e1000_mac_operations *mac_ops;
52328+ const struct e1000_phy_operations *phy_ops;
52329+ const struct e1000_nvm_operations *nvm_ops;
52330 };
52331
52332 /* hardware capability, feature, and workaround flags */
52333diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
52334index ae5d736..e9a93a1 100644
52335--- a/drivers/net/e1000e/es2lan.c
52336+++ b/drivers/net/e1000e/es2lan.c
52337@@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
52338 {
52339 struct e1000_hw *hw = &adapter->hw;
52340 struct e1000_mac_info *mac = &hw->mac;
52341- struct e1000_mac_operations *func = &mac->ops;
52342+ e1000_mac_operations_no_const *func = &mac->ops;
52343
52344 /* Set media type */
52345 switch (adapter->pdev->device) {
52346@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
52347 temp = er32(ICRXDMTC);
52348 }
52349
52350-static struct e1000_mac_operations es2_mac_ops = {
52351+static const struct e1000_mac_operations es2_mac_ops = {
52352 .id_led_init = e1000e_id_led_init,
52353 .check_mng_mode = e1000e_check_mng_mode_generic,
52354 /* check_for_link dependent on media type */
52355@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
52356 .setup_led = e1000e_setup_led_generic,
52357 };
52358
52359-static struct e1000_phy_operations es2_phy_ops = {
52360+static const struct e1000_phy_operations es2_phy_ops = {
52361 .acquire_phy = e1000_acquire_phy_80003es2lan,
52362 .check_reset_block = e1000e_check_reset_block_generic,
52363 .commit_phy = e1000e_phy_sw_reset,
52364@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
52365 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
52366 };
52367
52368-static struct e1000_nvm_operations es2_nvm_ops = {
52369+static const struct e1000_nvm_operations es2_nvm_ops = {
52370 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
52371 .read_nvm = e1000e_read_nvm_eerd,
52372 .release_nvm = e1000_release_nvm_80003es2lan,
52373diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
52374index 11f3b7c..6381887 100644
52375--- a/drivers/net/e1000e/hw.h
52376+++ b/drivers/net/e1000e/hw.h
52377@@ -753,6 +753,7 @@ struct e1000_mac_operations {
52378 s32 (*setup_physical_interface)(struct e1000_hw *);
52379 s32 (*setup_led)(struct e1000_hw *);
52380 };
52381+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
52382
52383 /* Function pointers for the PHY. */
52384 struct e1000_phy_operations {
52385@@ -774,6 +775,7 @@ struct e1000_phy_operations {
52386 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
52387 s32 (*cfg_on_link_up)(struct e1000_hw *);
52388 };
52389+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
52390
52391 /* Function pointers for the NVM. */
52392 struct e1000_nvm_operations {
52393@@ -785,9 +787,10 @@ struct e1000_nvm_operations {
52394 s32 (*validate_nvm)(struct e1000_hw *);
52395 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
52396 };
52397+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
52398
52399 struct e1000_mac_info {
52400- struct e1000_mac_operations ops;
52401+ e1000_mac_operations_no_const ops;
52402
52403 u8 addr[6];
52404 u8 perm_addr[6];
52405@@ -823,7 +826,7 @@ struct e1000_mac_info {
52406 };
52407
52408 struct e1000_phy_info {
52409- struct e1000_phy_operations ops;
52410+ e1000_phy_operations_no_const ops;
52411
52412 enum e1000_phy_type type;
52413
52414@@ -857,7 +860,7 @@ struct e1000_phy_info {
52415 };
52416
52417 struct e1000_nvm_info {
52418- struct e1000_nvm_operations ops;
52419+ e1000_nvm_operations_no_const ops;
52420
52421 enum e1000_nvm_type type;
52422 enum e1000_nvm_override override;
52423diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
52424index de39f9a..e28d3e0 100644
52425--- a/drivers/net/e1000e/ich8lan.c
52426+++ b/drivers/net/e1000e/ich8lan.c
52427@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
52428 }
52429 }
52430
52431-static struct e1000_mac_operations ich8_mac_ops = {
52432+static const struct e1000_mac_operations ich8_mac_ops = {
52433 .id_led_init = e1000e_id_led_init,
52434 .check_mng_mode = e1000_check_mng_mode_ich8lan,
52435 .check_for_link = e1000_check_for_copper_link_ich8lan,
52436@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
52437 /* id_led_init dependent on mac type */
52438 };
52439
52440-static struct e1000_phy_operations ich8_phy_ops = {
52441+static const struct e1000_phy_operations ich8_phy_ops = {
52442 .acquire_phy = e1000_acquire_swflag_ich8lan,
52443 .check_reset_block = e1000_check_reset_block_ich8lan,
52444 .commit_phy = NULL,
52445@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
52446 .write_phy_reg = e1000e_write_phy_reg_igp,
52447 };
52448
52449-static struct e1000_nvm_operations ich8_nvm_ops = {
52450+static const struct e1000_nvm_operations ich8_nvm_ops = {
52451 .acquire_nvm = e1000_acquire_nvm_ich8lan,
52452 .read_nvm = e1000_read_nvm_ich8lan,
52453 .release_nvm = e1000_release_nvm_ich8lan,
52454diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
52455index 18d5fbb..542d96d 100644
52456--- a/drivers/net/fealnx.c
52457+++ b/drivers/net/fealnx.c
52458@@ -151,7 +151,7 @@ struct chip_info {
52459 int flags;
52460 };
52461
52462-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
52463+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
52464 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
52465 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
52466 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
52467diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
52468index 0e5b54b..b503f82 100644
52469--- a/drivers/net/hamradio/6pack.c
52470+++ b/drivers/net/hamradio/6pack.c
52471@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
52472 unsigned char buf[512];
52473 int count1;
52474
52475+ pax_track_stack();
52476+
52477 if (!count)
52478 return;
52479
52480diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
52481index 5862282..7cce8cb 100644
52482--- a/drivers/net/ibmveth.c
52483+++ b/drivers/net/ibmveth.c
52484@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
52485 NULL,
52486 };
52487
52488-static struct sysfs_ops veth_pool_ops = {
52489+static const struct sysfs_ops veth_pool_ops = {
52490 .show = veth_pool_show,
52491 .store = veth_pool_store,
52492 };
52493diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
52494index d617f2d..57b5309 100644
52495--- a/drivers/net/igb/e1000_82575.c
52496+++ b/drivers/net/igb/e1000_82575.c
52497@@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
52498 wr32(E1000_VT_CTL, vt_ctl);
52499 }
52500
52501-static struct e1000_mac_operations e1000_mac_ops_82575 = {
52502+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
52503 .reset_hw = igb_reset_hw_82575,
52504 .init_hw = igb_init_hw_82575,
52505 .check_for_link = igb_check_for_link_82575,
52506@@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
52507 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
52508 };
52509
52510-static struct e1000_phy_operations e1000_phy_ops_82575 = {
52511+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
52512 .acquire = igb_acquire_phy_82575,
52513 .get_cfg_done = igb_get_cfg_done_82575,
52514 .release = igb_release_phy_82575,
52515 };
52516
52517-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
52518+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
52519 .acquire = igb_acquire_nvm_82575,
52520 .read = igb_read_nvm_eerd,
52521 .release = igb_release_nvm_82575,
52522diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
52523index 72081df..d855cf5 100644
52524--- a/drivers/net/igb/e1000_hw.h
52525+++ b/drivers/net/igb/e1000_hw.h
52526@@ -288,6 +288,7 @@ struct e1000_mac_operations {
52527 s32 (*read_mac_addr)(struct e1000_hw *);
52528 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
52529 };
52530+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
52531
52532 struct e1000_phy_operations {
52533 s32 (*acquire)(struct e1000_hw *);
52534@@ -303,6 +304,7 @@ struct e1000_phy_operations {
52535 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
52536 s32 (*write_reg)(struct e1000_hw *, u32, u16);
52537 };
52538+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
52539
52540 struct e1000_nvm_operations {
52541 s32 (*acquire)(struct e1000_hw *);
52542@@ -310,6 +312,7 @@ struct e1000_nvm_operations {
52543 void (*release)(struct e1000_hw *);
52544 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
52545 };
52546+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
52547
52548 struct e1000_info {
52549 s32 (*get_invariants)(struct e1000_hw *);
52550@@ -321,7 +324,7 @@ struct e1000_info {
52551 extern const struct e1000_info e1000_82575_info;
52552
52553 struct e1000_mac_info {
52554- struct e1000_mac_operations ops;
52555+ e1000_mac_operations_no_const ops;
52556
52557 u8 addr[6];
52558 u8 perm_addr[6];
52559@@ -365,7 +368,7 @@ struct e1000_mac_info {
52560 };
52561
52562 struct e1000_phy_info {
52563- struct e1000_phy_operations ops;
52564+ e1000_phy_operations_no_const ops;
52565
52566 enum e1000_phy_type type;
52567
52568@@ -400,7 +403,7 @@ struct e1000_phy_info {
52569 };
52570
52571 struct e1000_nvm_info {
52572- struct e1000_nvm_operations ops;
52573+ e1000_nvm_operations_no_const ops;
52574
52575 enum e1000_nvm_type type;
52576 enum e1000_nvm_override override;
52577@@ -446,6 +449,7 @@ struct e1000_mbx_operations {
52578 s32 (*check_for_ack)(struct e1000_hw *, u16);
52579 s32 (*check_for_rst)(struct e1000_hw *, u16);
52580 };
52581+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
52582
52583 struct e1000_mbx_stats {
52584 u32 msgs_tx;
52585@@ -457,7 +461,7 @@ struct e1000_mbx_stats {
52586 };
52587
52588 struct e1000_mbx_info {
52589- struct e1000_mbx_operations ops;
52590+ e1000_mbx_operations_no_const ops;
52591 struct e1000_mbx_stats stats;
52592 u32 timeout;
52593 u32 usec_delay;
52594diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
52595index 1e8ce37..549c453 100644
52596--- a/drivers/net/igbvf/vf.h
52597+++ b/drivers/net/igbvf/vf.h
52598@@ -187,9 +187,10 @@ struct e1000_mac_operations {
52599 s32 (*read_mac_addr)(struct e1000_hw *);
52600 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
52601 };
52602+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
52603
52604 struct e1000_mac_info {
52605- struct e1000_mac_operations ops;
52606+ e1000_mac_operations_no_const ops;
52607 u8 addr[6];
52608 u8 perm_addr[6];
52609
52610@@ -211,6 +212,7 @@ struct e1000_mbx_operations {
52611 s32 (*check_for_ack)(struct e1000_hw *);
52612 s32 (*check_for_rst)(struct e1000_hw *);
52613 };
52614+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
52615
52616 struct e1000_mbx_stats {
52617 u32 msgs_tx;
52618@@ -222,7 +224,7 @@ struct e1000_mbx_stats {
52619 };
52620
52621 struct e1000_mbx_info {
52622- struct e1000_mbx_operations ops;
52623+ e1000_mbx_operations_no_const ops;
52624 struct e1000_mbx_stats stats;
52625 u32 timeout;
52626 u32 usec_delay;
52627diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
52628index aa7286b..a61394f 100644
52629--- a/drivers/net/iseries_veth.c
52630+++ b/drivers/net/iseries_veth.c
52631@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
52632 NULL
52633 };
52634
52635-static struct sysfs_ops veth_cnx_sysfs_ops = {
52636+static const struct sysfs_ops veth_cnx_sysfs_ops = {
52637 .show = veth_cnx_attribute_show
52638 };
52639
52640@@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
52641 NULL
52642 };
52643
52644-static struct sysfs_ops veth_port_sysfs_ops = {
52645+static const struct sysfs_ops veth_port_sysfs_ops = {
52646 .show = veth_port_attribute_show
52647 };
52648
52649diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
52650index 8aa44dc..fa1e797 100644
52651--- a/drivers/net/ixgb/ixgb_main.c
52652+++ b/drivers/net/ixgb/ixgb_main.c
52653@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev)
52654 u32 rctl;
52655 int i;
52656
52657+ pax_track_stack();
52658+
52659 /* Check for Promiscuous and All Multicast modes */
52660
52661 rctl = IXGB_READ_REG(hw, RCTL);
52662diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
52663index af35e1d..8781785 100644
52664--- a/drivers/net/ixgb/ixgb_param.c
52665+++ b/drivers/net/ixgb/ixgb_param.c
52666@@ -260,6 +260,9 @@ void __devinit
52667 ixgb_check_options(struct ixgb_adapter *adapter)
52668 {
52669 int bd = adapter->bd_number;
52670+
52671+ pax_track_stack();
52672+
52673 if (bd >= IXGB_MAX_NIC) {
52674 printk(KERN_NOTICE
52675 "Warning: no configuration for board #%i\n", bd);
52676diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
52677index b17aa73..ed74540 100644
52678--- a/drivers/net/ixgbe/ixgbe_type.h
52679+++ b/drivers/net/ixgbe/ixgbe_type.h
52680@@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
52681 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
52682 s32 (*update_checksum)(struct ixgbe_hw *);
52683 };
52684+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
52685
52686 struct ixgbe_mac_operations {
52687 s32 (*init_hw)(struct ixgbe_hw *);
52688@@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
52689 /* Flow Control */
52690 s32 (*fc_enable)(struct ixgbe_hw *, s32);
52691 };
52692+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
52693
52694 struct ixgbe_phy_operations {
52695 s32 (*identify)(struct ixgbe_hw *);
52696@@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
52697 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
52698 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
52699 };
52700+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
52701
52702 struct ixgbe_eeprom_info {
52703- struct ixgbe_eeprom_operations ops;
52704+ ixgbe_eeprom_operations_no_const ops;
52705 enum ixgbe_eeprom_type type;
52706 u32 semaphore_delay;
52707 u16 word_size;
52708@@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
52709 };
52710
52711 struct ixgbe_mac_info {
52712- struct ixgbe_mac_operations ops;
52713+ ixgbe_mac_operations_no_const ops;
52714 enum ixgbe_mac_type type;
52715 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
52716 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
52717@@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
52718 };
52719
52720 struct ixgbe_phy_info {
52721- struct ixgbe_phy_operations ops;
52722+ ixgbe_phy_operations_no_const ops;
52723 struct mdio_if_info mdio;
52724 enum ixgbe_phy_type type;
52725 u32 id;
52726diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
52727index 291a505..2543756 100644
52728--- a/drivers/net/mlx4/main.c
52729+++ b/drivers/net/mlx4/main.c
52730@@ -38,6 +38,7 @@
52731 #include <linux/errno.h>
52732 #include <linux/pci.h>
52733 #include <linux/dma-mapping.h>
52734+#include <linux/sched.h>
52735
52736 #include <linux/mlx4/device.h>
52737 #include <linux/mlx4/doorbell.h>
52738@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
52739 u64 icm_size;
52740 int err;
52741
52742+ pax_track_stack();
52743+
52744 err = mlx4_QUERY_FW(dev);
52745 if (err) {
52746 if (err == -EACCES)
52747diff --git a/drivers/net/niu.c b/drivers/net/niu.c
52748index 2dce134..fa5ce75 100644
52749--- a/drivers/net/niu.c
52750+++ b/drivers/net/niu.c
52751@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
52752 int i, num_irqs, err;
52753 u8 first_ldg;
52754
52755+ pax_track_stack();
52756+
52757 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
52758 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
52759 ldg_num_map[i] = first_ldg + i;
52760diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
52761index c1b3f09..97cd8c4 100644
52762--- a/drivers/net/pcnet32.c
52763+++ b/drivers/net/pcnet32.c
52764@@ -79,7 +79,7 @@ static int cards_found;
52765 /*
52766 * VLB I/O addresses
52767 */
52768-static unsigned int pcnet32_portlist[] __initdata =
52769+static unsigned int pcnet32_portlist[] __devinitdata =
52770 { 0x300, 0x320, 0x340, 0x360, 0 };
52771
52772 static int pcnet32_debug = 0;
52773@@ -267,7 +267,7 @@ struct pcnet32_private {
52774 struct sk_buff **rx_skbuff;
52775 dma_addr_t *tx_dma_addr;
52776 dma_addr_t *rx_dma_addr;
52777- struct pcnet32_access a;
52778+ struct pcnet32_access *a;
52779 spinlock_t lock; /* Guard lock */
52780 unsigned int cur_rx, cur_tx; /* The next free ring entry */
52781 unsigned int rx_ring_size; /* current rx ring size */
52782@@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev)
52783 u16 val;
52784
52785 netif_wake_queue(dev);
52786- val = lp->a.read_csr(ioaddr, CSR3);
52787+ val = lp->a->read_csr(ioaddr, CSR3);
52788 val &= 0x00ff;
52789- lp->a.write_csr(ioaddr, CSR3, val);
52790+ lp->a->write_csr(ioaddr, CSR3, val);
52791 napi_enable(&lp->napi);
52792 }
52793
52794@@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
52795 r = mii_link_ok(&lp->mii_if);
52796 } else if (lp->chip_version >= PCNET32_79C970A) {
52797 ulong ioaddr = dev->base_addr; /* card base I/O address */
52798- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
52799+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
52800 } else { /* can not detect link on really old chips */
52801 r = 1;
52802 }
52803@@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
52804 pcnet32_netif_stop(dev);
52805
52806 spin_lock_irqsave(&lp->lock, flags);
52807- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
52808+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
52809
52810 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
52811
52812@@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
52813 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52814 {
52815 struct pcnet32_private *lp = netdev_priv(dev);
52816- struct pcnet32_access *a = &lp->a; /* access to registers */
52817+ struct pcnet32_access *a = lp->a; /* access to registers */
52818 ulong ioaddr = dev->base_addr; /* card base I/O address */
52819 struct sk_buff *skb; /* sk buff */
52820 int x, i; /* counters */
52821@@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52822 pcnet32_netif_stop(dev);
52823
52824 spin_lock_irqsave(&lp->lock, flags);
52825- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
52826+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
52827
52828 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
52829
52830 /* Reset the PCNET32 */
52831- lp->a.reset(ioaddr);
52832- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52833+ lp->a->reset(ioaddr);
52834+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52835
52836 /* switch pcnet32 to 32bit mode */
52837- lp->a.write_bcr(ioaddr, 20, 2);
52838+ lp->a->write_bcr(ioaddr, 20, 2);
52839
52840 /* purge & init rings but don't actually restart */
52841 pcnet32_restart(dev, 0x0000);
52842
52843- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
52844+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
52845
52846 /* Initialize Transmit buffers. */
52847 size = data_len + 15;
52848@@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52849
52850 /* set int loopback in CSR15 */
52851 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
52852- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
52853+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
52854
52855 teststatus = cpu_to_le16(0x8000);
52856- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
52857+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
52858
52859 /* Check status of descriptors */
52860 for (x = 0; x < numbuffs; x++) {
52861@@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52862 }
52863 }
52864
52865- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
52866+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
52867 wmb();
52868 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
52869 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
52870@@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52871 pcnet32_restart(dev, CSR0_NORMAL);
52872 } else {
52873 pcnet32_purge_rx_ring(dev);
52874- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
52875+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
52876 }
52877 spin_unlock_irqrestore(&lp->lock, flags);
52878
52879@@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52880 static void pcnet32_led_blink_callback(struct net_device *dev)
52881 {
52882 struct pcnet32_private *lp = netdev_priv(dev);
52883- struct pcnet32_access *a = &lp->a;
52884+ struct pcnet32_access *a = lp->a;
52885 ulong ioaddr = dev->base_addr;
52886 unsigned long flags;
52887 int i;
52888@@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
52889 static int pcnet32_phys_id(struct net_device *dev, u32 data)
52890 {
52891 struct pcnet32_private *lp = netdev_priv(dev);
52892- struct pcnet32_access *a = &lp->a;
52893+ struct pcnet32_access *a = lp->a;
52894 ulong ioaddr = dev->base_addr;
52895 unsigned long flags;
52896 int i, regs[4];
52897@@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
52898 {
52899 int csr5;
52900 struct pcnet32_private *lp = netdev_priv(dev);
52901- struct pcnet32_access *a = &lp->a;
52902+ struct pcnet32_access *a = lp->a;
52903 ulong ioaddr = dev->base_addr;
52904 int ticks;
52905
52906@@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
52907 spin_lock_irqsave(&lp->lock, flags);
52908 if (pcnet32_tx(dev)) {
52909 /* reset the chip to clear the error condition, then restart */
52910- lp->a.reset(ioaddr);
52911- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52912+ lp->a->reset(ioaddr);
52913+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52914 pcnet32_restart(dev, CSR0_START);
52915 netif_wake_queue(dev);
52916 }
52917@@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
52918 __napi_complete(napi);
52919
52920 /* clear interrupt masks */
52921- val = lp->a.read_csr(ioaddr, CSR3);
52922+ val = lp->a->read_csr(ioaddr, CSR3);
52923 val &= 0x00ff;
52924- lp->a.write_csr(ioaddr, CSR3, val);
52925+ lp->a->write_csr(ioaddr, CSR3, val);
52926
52927 /* Set interrupt enable. */
52928- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
52929+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
52930
52931 spin_unlock_irqrestore(&lp->lock, flags);
52932 }
52933@@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
52934 int i, csr0;
52935 u16 *buff = ptr;
52936 struct pcnet32_private *lp = netdev_priv(dev);
52937- struct pcnet32_access *a = &lp->a;
52938+ struct pcnet32_access *a = lp->a;
52939 ulong ioaddr = dev->base_addr;
52940 unsigned long flags;
52941
52942@@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
52943 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
52944 if (lp->phymask & (1 << j)) {
52945 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
52946- lp->a.write_bcr(ioaddr, 33,
52947+ lp->a->write_bcr(ioaddr, 33,
52948 (j << 5) | i);
52949- *buff++ = lp->a.read_bcr(ioaddr, 34);
52950+ *buff++ = lp->a->read_bcr(ioaddr, 34);
52951 }
52952 }
52953 }
52954@@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
52955 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
52956 lp->options |= PCNET32_PORT_FD;
52957
52958- lp->a = *a;
52959+ lp->a = a;
52960
52961 /* prior to register_netdev, dev->name is not yet correct */
52962 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
52963@@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
52964 if (lp->mii) {
52965 /* lp->phycount and lp->phymask are set to 0 by memset above */
52966
52967- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
52968+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
52969 /* scan for PHYs */
52970 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
52971 unsigned short id1, id2;
52972@@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
52973 "Found PHY %04x:%04x at address %d.\n",
52974 id1, id2, i);
52975 }
52976- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
52977+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
52978 if (lp->phycount > 1) {
52979 lp->options |= PCNET32_PORT_MII;
52980 }
52981@@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev)
52982 }
52983
52984 /* Reset the PCNET32 */
52985- lp->a.reset(ioaddr);
52986+ lp->a->reset(ioaddr);
52987
52988 /* switch pcnet32 to 32bit mode */
52989- lp->a.write_bcr(ioaddr, 20, 2);
52990+ lp->a->write_bcr(ioaddr, 20, 2);
52991
52992 if (netif_msg_ifup(lp))
52993 printk(KERN_DEBUG
52994@@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev)
52995 (u32) (lp->init_dma_addr));
52996
52997 /* set/reset autoselect bit */
52998- val = lp->a.read_bcr(ioaddr, 2) & ~2;
52999+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
53000 if (lp->options & PCNET32_PORT_ASEL)
53001 val |= 2;
53002- lp->a.write_bcr(ioaddr, 2, val);
53003+ lp->a->write_bcr(ioaddr, 2, val);
53004
53005 /* handle full duplex setting */
53006 if (lp->mii_if.full_duplex) {
53007- val = lp->a.read_bcr(ioaddr, 9) & ~3;
53008+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
53009 if (lp->options & PCNET32_PORT_FD) {
53010 val |= 1;
53011 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
53012@@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev)
53013 if (lp->chip_version == 0x2627)
53014 val |= 3;
53015 }
53016- lp->a.write_bcr(ioaddr, 9, val);
53017+ lp->a->write_bcr(ioaddr, 9, val);
53018 }
53019
53020 /* set/reset GPSI bit in test register */
53021- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
53022+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
53023 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
53024 val |= 0x10;
53025- lp->a.write_csr(ioaddr, 124, val);
53026+ lp->a->write_csr(ioaddr, 124, val);
53027
53028 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
53029 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
53030@@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev)
53031 * duplex, and/or enable auto negotiation, and clear DANAS
53032 */
53033 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
53034- lp->a.write_bcr(ioaddr, 32,
53035- lp->a.read_bcr(ioaddr, 32) | 0x0080);
53036+ lp->a->write_bcr(ioaddr, 32,
53037+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
53038 /* disable Auto Negotiation, set 10Mpbs, HD */
53039- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
53040+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
53041 if (lp->options & PCNET32_PORT_FD)
53042 val |= 0x10;
53043 if (lp->options & PCNET32_PORT_100)
53044 val |= 0x08;
53045- lp->a.write_bcr(ioaddr, 32, val);
53046+ lp->a->write_bcr(ioaddr, 32, val);
53047 } else {
53048 if (lp->options & PCNET32_PORT_ASEL) {
53049- lp->a.write_bcr(ioaddr, 32,
53050- lp->a.read_bcr(ioaddr,
53051+ lp->a->write_bcr(ioaddr, 32,
53052+ lp->a->read_bcr(ioaddr,
53053 32) | 0x0080);
53054 /* enable auto negotiate, setup, disable fd */
53055- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
53056+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
53057 val |= 0x20;
53058- lp->a.write_bcr(ioaddr, 32, val);
53059+ lp->a->write_bcr(ioaddr, 32, val);
53060 }
53061 }
53062 } else {
53063@@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev)
53064 * There is really no good other way to handle multiple PHYs
53065 * other than turning off all automatics
53066 */
53067- val = lp->a.read_bcr(ioaddr, 2);
53068- lp->a.write_bcr(ioaddr, 2, val & ~2);
53069- val = lp->a.read_bcr(ioaddr, 32);
53070- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
53071+ val = lp->a->read_bcr(ioaddr, 2);
53072+ lp->a->write_bcr(ioaddr, 2, val & ~2);
53073+ val = lp->a->read_bcr(ioaddr, 32);
53074+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
53075
53076 if (!(lp->options & PCNET32_PORT_ASEL)) {
53077 /* setup ecmd */
53078@@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev)
53079 ecmd.speed =
53080 lp->
53081 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
53082- bcr9 = lp->a.read_bcr(ioaddr, 9);
53083+ bcr9 = lp->a->read_bcr(ioaddr, 9);
53084
53085 if (lp->options & PCNET32_PORT_FD) {
53086 ecmd.duplex = DUPLEX_FULL;
53087@@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev)
53088 ecmd.duplex = DUPLEX_HALF;
53089 bcr9 |= ~(1 << 0);
53090 }
53091- lp->a.write_bcr(ioaddr, 9, bcr9);
53092+ lp->a->write_bcr(ioaddr, 9, bcr9);
53093 }
53094
53095 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
53096@@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev)
53097
53098 #ifdef DO_DXSUFLO
53099 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
53100- val = lp->a.read_csr(ioaddr, CSR3);
53101+ val = lp->a->read_csr(ioaddr, CSR3);
53102 val |= 0x40;
53103- lp->a.write_csr(ioaddr, CSR3, val);
53104+ lp->a->write_csr(ioaddr, CSR3, val);
53105 }
53106 #endif
53107
53108@@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev)
53109 napi_enable(&lp->napi);
53110
53111 /* Re-initialize the PCNET32, and start it when done. */
53112- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
53113- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
53114+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
53115+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
53116
53117- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
53118- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
53119+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
53120+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
53121
53122 netif_start_queue(dev);
53123
53124@@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev)
53125
53126 i = 0;
53127 while (i++ < 100)
53128- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
53129+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
53130 break;
53131 /*
53132 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
53133 * reports that doing so triggers a bug in the '974.
53134 */
53135- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
53136+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
53137
53138 if (netif_msg_ifup(lp))
53139 printk(KERN_DEBUG
53140 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
53141 dev->name, i,
53142 (u32) (lp->init_dma_addr),
53143- lp->a.read_csr(ioaddr, CSR0));
53144+ lp->a->read_csr(ioaddr, CSR0));
53145
53146 spin_unlock_irqrestore(&lp->lock, flags);
53147
53148@@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev)
53149 * Switch back to 16bit mode to avoid problems with dumb
53150 * DOS packet driver after a warm reboot
53151 */
53152- lp->a.write_bcr(ioaddr, 20, 4);
53153+ lp->a->write_bcr(ioaddr, 20, 4);
53154
53155 err_free_irq:
53156 spin_unlock_irqrestore(&lp->lock, flags);
53157@@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
53158
53159 /* wait for stop */
53160 for (i = 0; i < 100; i++)
53161- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
53162+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
53163 break;
53164
53165 if (i >= 100 && netif_msg_drv(lp))
53166@@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
53167 return;
53168
53169 /* ReInit Ring */
53170- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
53171+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
53172 i = 0;
53173 while (i++ < 1000)
53174- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
53175+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
53176 break;
53177
53178- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
53179+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
53180 }
53181
53182 static void pcnet32_tx_timeout(struct net_device *dev)
53183@@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
53184 if (pcnet32_debug & NETIF_MSG_DRV)
53185 printk(KERN_ERR
53186 "%s: transmit timed out, status %4.4x, resetting.\n",
53187- dev->name, lp->a.read_csr(ioaddr, CSR0));
53188- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
53189+ dev->name, lp->a->read_csr(ioaddr, CSR0));
53190+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
53191 dev->stats.tx_errors++;
53192 if (netif_msg_tx_err(lp)) {
53193 int i;
53194@@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
53195 if (netif_msg_tx_queued(lp)) {
53196 printk(KERN_DEBUG
53197 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
53198- dev->name, lp->a.read_csr(ioaddr, CSR0));
53199+ dev->name, lp->a->read_csr(ioaddr, CSR0));
53200 }
53201
53202 /* Default status -- will not enable Successful-TxDone
53203@@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
53204 dev->stats.tx_bytes += skb->len;
53205
53206 /* Trigger an immediate send poll. */
53207- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
53208+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
53209
53210 dev->trans_start = jiffies;
53211
53212@@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
53213
53214 spin_lock(&lp->lock);
53215
53216- csr0 = lp->a.read_csr(ioaddr, CSR0);
53217+ csr0 = lp->a->read_csr(ioaddr, CSR0);
53218 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
53219 if (csr0 == 0xffff) {
53220 break; /* PCMCIA remove happened */
53221 }
53222 /* Acknowledge all of the current interrupt sources ASAP. */
53223- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
53224+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
53225
53226 if (netif_msg_intr(lp))
53227 printk(KERN_DEBUG
53228 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
53229- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
53230+ dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
53231
53232 /* Log misc errors. */
53233 if (csr0 & 0x4000)
53234@@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
53235 if (napi_schedule_prep(&lp->napi)) {
53236 u16 val;
53237 /* set interrupt masks */
53238- val = lp->a.read_csr(ioaddr, CSR3);
53239+ val = lp->a->read_csr(ioaddr, CSR3);
53240 val |= 0x5f00;
53241- lp->a.write_csr(ioaddr, CSR3, val);
53242+ lp->a->write_csr(ioaddr, CSR3, val);
53243
53244 __napi_schedule(&lp->napi);
53245 break;
53246 }
53247- csr0 = lp->a.read_csr(ioaddr, CSR0);
53248+ csr0 = lp->a->read_csr(ioaddr, CSR0);
53249 }
53250
53251 if (netif_msg_intr(lp))
53252 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
53253- dev->name, lp->a.read_csr(ioaddr, CSR0));
53254+ dev->name, lp->a->read_csr(ioaddr, CSR0));
53255
53256 spin_unlock(&lp->lock);
53257
53258@@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev)
53259
53260 spin_lock_irqsave(&lp->lock, flags);
53261
53262- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
53263+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
53264
53265 if (netif_msg_ifdown(lp))
53266 printk(KERN_DEBUG
53267 "%s: Shutting down ethercard, status was %2.2x.\n",
53268- dev->name, lp->a.read_csr(ioaddr, CSR0));
53269+ dev->name, lp->a->read_csr(ioaddr, CSR0));
53270
53271 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
53272- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
53273+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
53274
53275 /*
53276 * Switch back to 16bit mode to avoid problems with dumb
53277 * DOS packet driver after a warm reboot
53278 */
53279- lp->a.write_bcr(ioaddr, 20, 4);
53280+ lp->a->write_bcr(ioaddr, 20, 4);
53281
53282 spin_unlock_irqrestore(&lp->lock, flags);
53283
53284@@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
53285 unsigned long flags;
53286
53287 spin_lock_irqsave(&lp->lock, flags);
53288- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
53289+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
53290 spin_unlock_irqrestore(&lp->lock, flags);
53291
53292 return &dev->stats;
53293@@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
53294 if (dev->flags & IFF_ALLMULTI) {
53295 ib->filter[0] = cpu_to_le32(~0U);
53296 ib->filter[1] = cpu_to_le32(~0U);
53297- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
53298- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
53299- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
53300- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
53301+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
53302+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
53303+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
53304+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
53305 return;
53306 }
53307 /* clear the multicast filter */
53308@@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
53309 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
53310 }
53311 for (i = 0; i < 4; i++)
53312- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
53313+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
53314 le16_to_cpu(mcast_table[i]));
53315 return;
53316 }
53317@@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
53318
53319 spin_lock_irqsave(&lp->lock, flags);
53320 suspended = pcnet32_suspend(dev, &flags, 0);
53321- csr15 = lp->a.read_csr(ioaddr, CSR15);
53322+ csr15 = lp->a->read_csr(ioaddr, CSR15);
53323 if (dev->flags & IFF_PROMISC) {
53324 /* Log any net taps. */
53325 if (netif_msg_hw(lp))
53326@@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
53327 lp->init_block->mode =
53328 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
53329 7);
53330- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
53331+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
53332 } else {
53333 lp->init_block->mode =
53334 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
53335- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
53336+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
53337 pcnet32_load_multicast(dev);
53338 }
53339
53340 if (suspended) {
53341 int csr5;
53342 /* clear SUSPEND (SPND) - CSR5 bit 0 */
53343- csr5 = lp->a.read_csr(ioaddr, CSR5);
53344- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
53345+ csr5 = lp->a->read_csr(ioaddr, CSR5);
53346+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
53347 } else {
53348- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
53349+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
53350 pcnet32_restart(dev, CSR0_NORMAL);
53351 netif_wake_queue(dev);
53352 }
53353@@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
53354 if (!lp->mii)
53355 return 0;
53356
53357- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
53358- val_out = lp->a.read_bcr(ioaddr, 34);
53359+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
53360+ val_out = lp->a->read_bcr(ioaddr, 34);
53361
53362 return val_out;
53363 }
53364@@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
53365 if (!lp->mii)
53366 return;
53367
53368- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
53369- lp->a.write_bcr(ioaddr, 34, val);
53370+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
53371+ lp->a->write_bcr(ioaddr, 34, val);
53372 }
53373
53374 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
53375@@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
53376 curr_link = mii_link_ok(&lp->mii_if);
53377 } else {
53378 ulong ioaddr = dev->base_addr; /* card base I/O address */
53379- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
53380+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
53381 }
53382 if (!curr_link) {
53383 if (prev_link || verbose) {
53384@@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
53385 (ecmd.duplex ==
53386 DUPLEX_FULL) ? "full" : "half");
53387 }
53388- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
53389+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
53390 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
53391 if (lp->mii_if.full_duplex)
53392 bcr9 |= (1 << 0);
53393 else
53394 bcr9 &= ~(1 << 0);
53395- lp->a.write_bcr(dev->base_addr, 9, bcr9);
53396+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
53397 }
53398 } else {
53399 if (netif_msg_link(lp))
53400diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
53401index 7cc9898..6eb50d3 100644
53402--- a/drivers/net/sis190.c
53403+++ b/drivers/net/sis190.c
53404@@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
53405 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
53406 struct net_device *dev)
53407 {
53408- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
53409+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
53410 struct sis190_private *tp = netdev_priv(dev);
53411 struct pci_dev *isa_bridge;
53412 u8 reg, tmp8;
53413diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
53414index e13685a..60c948c 100644
53415--- a/drivers/net/sundance.c
53416+++ b/drivers/net/sundance.c
53417@@ -225,7 +225,7 @@ enum {
53418 struct pci_id_info {
53419 const char *name;
53420 };
53421-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
53422+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
53423 {"D-Link DFE-550TX FAST Ethernet Adapter"},
53424 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
53425 {"D-Link DFE-580TX 4 port Server Adapter"},
53426diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
53427index 529f55a..cccaa18 100644
53428--- a/drivers/net/tg3.h
53429+++ b/drivers/net/tg3.h
53430@@ -95,6 +95,7 @@
53431 #define CHIPREV_ID_5750_A0 0x4000
53432 #define CHIPREV_ID_5750_A1 0x4001
53433 #define CHIPREV_ID_5750_A3 0x4003
53434+#define CHIPREV_ID_5750_C1 0x4201
53435 #define CHIPREV_ID_5750_C2 0x4202
53436 #define CHIPREV_ID_5752_A0_HW 0x5000
53437 #define CHIPREV_ID_5752_A0 0x6000
53438diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
53439index b9db1b5..720f9ce 100644
53440--- a/drivers/net/tokenring/abyss.c
53441+++ b/drivers/net/tokenring/abyss.c
53442@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
53443
53444 static int __init abyss_init (void)
53445 {
53446- abyss_netdev_ops = tms380tr_netdev_ops;
53447+ pax_open_kernel();
53448+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
53449
53450- abyss_netdev_ops.ndo_open = abyss_open;
53451- abyss_netdev_ops.ndo_stop = abyss_close;
53452+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
53453+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
53454+ pax_close_kernel();
53455
53456 return pci_register_driver(&abyss_driver);
53457 }
53458diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
53459index 456f8bf..373e56d 100644
53460--- a/drivers/net/tokenring/madgemc.c
53461+++ b/drivers/net/tokenring/madgemc.c
53462@@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = {
53463
53464 static int __init madgemc_init (void)
53465 {
53466- madgemc_netdev_ops = tms380tr_netdev_ops;
53467- madgemc_netdev_ops.ndo_open = madgemc_open;
53468- madgemc_netdev_ops.ndo_stop = madgemc_close;
53469+ pax_open_kernel();
53470+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
53471+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
53472+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
53473+ pax_close_kernel();
53474
53475 return mca_register_driver (&madgemc_driver);
53476 }
53477diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
53478index 16e8783..925bd49 100644
53479--- a/drivers/net/tokenring/proteon.c
53480+++ b/drivers/net/tokenring/proteon.c
53481@@ -353,9 +353,11 @@ static int __init proteon_init(void)
53482 struct platform_device *pdev;
53483 int i, num = 0, err = 0;
53484
53485- proteon_netdev_ops = tms380tr_netdev_ops;
53486- proteon_netdev_ops.ndo_open = proteon_open;
53487- proteon_netdev_ops.ndo_stop = tms380tr_close;
53488+ pax_open_kernel();
53489+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
53490+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
53491+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
53492+ pax_close_kernel();
53493
53494 err = platform_driver_register(&proteon_driver);
53495 if (err)
53496diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
53497index 46db5c5..37c1536 100644
53498--- a/drivers/net/tokenring/skisa.c
53499+++ b/drivers/net/tokenring/skisa.c
53500@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
53501 struct platform_device *pdev;
53502 int i, num = 0, err = 0;
53503
53504- sk_isa_netdev_ops = tms380tr_netdev_ops;
53505- sk_isa_netdev_ops.ndo_open = sk_isa_open;
53506- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
53507+ pax_open_kernel();
53508+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
53509+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
53510+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
53511+ pax_close_kernel();
53512
53513 err = platform_driver_register(&sk_isa_driver);
53514 if (err)
53515diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
53516index 74e5ba4..5cf6bc9 100644
53517--- a/drivers/net/tulip/de2104x.c
53518+++ b/drivers/net/tulip/de2104x.c
53519@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
53520 struct de_srom_info_leaf *il;
53521 void *bufp;
53522
53523+ pax_track_stack();
53524+
53525 /* download entire eeprom */
53526 for (i = 0; i < DE_EEPROM_WORDS; i++)
53527 ((__le16 *)ee_data)[i] =
53528diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
53529index a8349b7..90f9dfe 100644
53530--- a/drivers/net/tulip/de4x5.c
53531+++ b/drivers/net/tulip/de4x5.c
53532@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
53533 for (i=0; i<ETH_ALEN; i++) {
53534 tmp.addr[i] = dev->dev_addr[i];
53535 }
53536- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
53537+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
53538 break;
53539
53540 case DE4X5_SET_HWADDR: /* Set the hardware address */
53541@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
53542 spin_lock_irqsave(&lp->lock, flags);
53543 memcpy(&statbuf, &lp->pktStats, ioc->len);
53544 spin_unlock_irqrestore(&lp->lock, flags);
53545- if (copy_to_user(ioc->data, &statbuf, ioc->len))
53546+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
53547 return -EFAULT;
53548 break;
53549 }
53550diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
53551index 391acd3..56d11cd 100644
53552--- a/drivers/net/tulip/eeprom.c
53553+++ b/drivers/net/tulip/eeprom.c
53554@@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
53555 {NULL}};
53556
53557
53558-static const char *block_name[] __devinitdata = {
53559+static const char *block_name[] __devinitconst = {
53560 "21140 non-MII",
53561 "21140 MII PHY",
53562 "21142 Serial PHY",
53563diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
53564index b38d3b7..b1cff23 100644
53565--- a/drivers/net/tulip/winbond-840.c
53566+++ b/drivers/net/tulip/winbond-840.c
53567@@ -235,7 +235,7 @@ struct pci_id_info {
53568 int drv_flags; /* Driver use, intended as capability flags. */
53569 };
53570
53571-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
53572+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
53573 { /* Sometime a Level-One switch card. */
53574 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
53575 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
53576diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
53577index f450bc9..2b747c8 100644
53578--- a/drivers/net/usb/hso.c
53579+++ b/drivers/net/usb/hso.c
53580@@ -71,7 +71,7 @@
53581 #include <asm/byteorder.h>
53582 #include <linux/serial_core.h>
53583 #include <linux/serial.h>
53584-
53585+#include <asm/local.h>
53586
53587 #define DRIVER_VERSION "1.2"
53588 #define MOD_AUTHOR "Option Wireless"
53589@@ -258,7 +258,7 @@ struct hso_serial {
53590
53591 /* from usb_serial_port */
53592 struct tty_struct *tty;
53593- int open_count;
53594+ local_t open_count;
53595 spinlock_t serial_lock;
53596
53597 int (*write_data) (struct hso_serial *serial);
53598@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
53599 struct urb *urb;
53600
53601 urb = serial->rx_urb[0];
53602- if (serial->open_count > 0) {
53603+ if (local_read(&serial->open_count) > 0) {
53604 count = put_rxbuf_data(urb, serial);
53605 if (count == -1)
53606 return;
53607@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
53608 DUMP1(urb->transfer_buffer, urb->actual_length);
53609
53610 /* Anyone listening? */
53611- if (serial->open_count == 0)
53612+ if (local_read(&serial->open_count) == 0)
53613 return;
53614
53615 if (status == 0) {
53616@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
53617 spin_unlock_irq(&serial->serial_lock);
53618
53619 /* check for port already opened, if not set the termios */
53620- serial->open_count++;
53621- if (serial->open_count == 1) {
53622+ if (local_inc_return(&serial->open_count) == 1) {
53623 tty->low_latency = 1;
53624 serial->rx_state = RX_IDLE;
53625 /* Force default termio settings */
53626@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
53627 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
53628 if (result) {
53629 hso_stop_serial_device(serial->parent);
53630- serial->open_count--;
53631+ local_dec(&serial->open_count);
53632 kref_put(&serial->parent->ref, hso_serial_ref_free);
53633 }
53634 } else {
53635@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
53636
53637 /* reset the rts and dtr */
53638 /* do the actual close */
53639- serial->open_count--;
53640+ local_dec(&serial->open_count);
53641
53642- if (serial->open_count <= 0) {
53643- serial->open_count = 0;
53644+ if (local_read(&serial->open_count) <= 0) {
53645+ local_set(&serial->open_count, 0);
53646 spin_lock_irq(&serial->serial_lock);
53647 if (serial->tty == tty) {
53648 serial->tty->driver_data = NULL;
53649@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
53650
53651 /* the actual setup */
53652 spin_lock_irqsave(&serial->serial_lock, flags);
53653- if (serial->open_count)
53654+ if (local_read(&serial->open_count))
53655 _hso_serial_set_termios(tty, old);
53656 else
53657 tty->termios = old;
53658@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface)
53659 /* Start all serial ports */
53660 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
53661 if (serial_table[i] && (serial_table[i]->interface == iface)) {
53662- if (dev2ser(serial_table[i])->open_count) {
53663+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
53664 result =
53665 hso_start_serial_device(serial_table[i], GFP_NOIO);
53666 hso_kick_transmit(dev2ser(serial_table[i]));
53667diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
53668index 3e94f0c..ffdd926 100644
53669--- a/drivers/net/vxge/vxge-config.h
53670+++ b/drivers/net/vxge/vxge-config.h
53671@@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
53672 void (*link_down)(struct __vxge_hw_device *devh);
53673 void (*crit_err)(struct __vxge_hw_device *devh,
53674 enum vxge_hw_event type, u64 ext_data);
53675-};
53676+} __no_const;
53677
53678 /*
53679 * struct __vxge_hw_blockpool_entry - Block private data structure
53680diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
53681index 068d7a9..35293de 100644
53682--- a/drivers/net/vxge/vxge-main.c
53683+++ b/drivers/net/vxge/vxge-main.c
53684@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
53685 struct sk_buff *completed[NR_SKB_COMPLETED];
53686 int more;
53687
53688+ pax_track_stack();
53689+
53690 do {
53691 more = 0;
53692 skb_ptr = completed;
53693@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
53694 u8 mtable[256] = {0}; /* CPU to vpath mapping */
53695 int index;
53696
53697+ pax_track_stack();
53698+
53699 /*
53700 * Filling
53701 * - itable with bucket numbers
53702diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
53703index 461742b..81be42e 100644
53704--- a/drivers/net/vxge/vxge-traffic.h
53705+++ b/drivers/net/vxge/vxge-traffic.h
53706@@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
53707 struct vxge_hw_mempool_dma *dma_object,
53708 u32 index,
53709 u32 is_last);
53710-};
53711+} __no_const;
53712
53713 void
53714 __vxge_hw_mempool_destroy(
53715diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
53716index cd8cb95..4153b79 100644
53717--- a/drivers/net/wan/cycx_x25.c
53718+++ b/drivers/net/wan/cycx_x25.c
53719@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len)
53720 unsigned char hex[1024],
53721 * phex = hex;
53722
53723+ pax_track_stack();
53724+
53725 if (len >= (sizeof(hex) / 2))
53726 len = (sizeof(hex) / 2) - 1;
53727
53728diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
53729index aa9248f..a4e3c3b 100644
53730--- a/drivers/net/wan/hdlc_x25.c
53731+++ b/drivers/net/wan/hdlc_x25.c
53732@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
53733
53734 static int x25_open(struct net_device *dev)
53735 {
53736- struct lapb_register_struct cb;
53737+ static struct lapb_register_struct cb = {
53738+ .connect_confirmation = x25_connected,
53739+ .connect_indication = x25_connected,
53740+ .disconnect_confirmation = x25_disconnected,
53741+ .disconnect_indication = x25_disconnected,
53742+ .data_indication = x25_data_indication,
53743+ .data_transmit = x25_data_transmit
53744+ };
53745 int result;
53746
53747- cb.connect_confirmation = x25_connected;
53748- cb.connect_indication = x25_connected;
53749- cb.disconnect_confirmation = x25_disconnected;
53750- cb.disconnect_indication = x25_disconnected;
53751- cb.data_indication = x25_data_indication;
53752- cb.data_transmit = x25_data_transmit;
53753-
53754 result = lapb_register(dev, &cb);
53755 if (result != LAPB_OK)
53756 return result;
53757diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
53758index 5ad287c..783b020 100644
53759--- a/drivers/net/wimax/i2400m/usb-fw.c
53760+++ b/drivers/net/wimax/i2400m/usb-fw.c
53761@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
53762 int do_autopm = 1;
53763 DECLARE_COMPLETION_ONSTACK(notif_completion);
53764
53765+ pax_track_stack();
53766+
53767 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
53768 i2400m, ack, ack_size);
53769 BUG_ON(_ack == i2400m->bm_ack_buf);
53770diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
53771index 6c26840..62c97c3 100644
53772--- a/drivers/net/wireless/airo.c
53773+++ b/drivers/net/wireless/airo.c
53774@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
53775 BSSListElement * loop_net;
53776 BSSListElement * tmp_net;
53777
53778+ pax_track_stack();
53779+
53780 /* Blow away current list of scan results */
53781 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
53782 list_move_tail (&loop_net->list, &ai->network_free_list);
53783@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
53784 WepKeyRid wkr;
53785 int rc;
53786
53787+ pax_track_stack();
53788+
53789 memset( &mySsid, 0, sizeof( mySsid ) );
53790 kfree (ai->flash);
53791 ai->flash = NULL;
53792@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode,
53793 __le32 *vals = stats.vals;
53794 int len;
53795
53796+ pax_track_stack();
53797+
53798 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
53799 return -ENOMEM;
53800 data = (struct proc_data *)file->private_data;
53801@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
53802 /* If doLoseSync is not 1, we won't do a Lose Sync */
53803 int doLoseSync = -1;
53804
53805+ pax_track_stack();
53806+
53807 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
53808 return -ENOMEM;
53809 data = (struct proc_data *)file->private_data;
53810@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev,
53811 int i;
53812 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
53813
53814+ pax_track_stack();
53815+
53816 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
53817 if (!qual)
53818 return -ENOMEM;
53819@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
53820 CapabilityRid cap_rid;
53821 __le32 *vals = stats_rid.vals;
53822
53823+ pax_track_stack();
53824+
53825 /* Get stats out of the card */
53826 clear_bit(JOB_WSTATS, &local->jobs);
53827 if (local->power.event) {
53828diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
53829index 747508c..c36cb08 100644
53830--- a/drivers/net/wireless/ath/ath5k/debug.c
53831+++ b/drivers/net/wireless/ath/ath5k/debug.c
53832@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
53833 unsigned int v;
53834 u64 tsf;
53835
53836+ pax_track_stack();
53837+
53838 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
53839 len += snprintf(buf+len, sizeof(buf)-len,
53840 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
53841@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
53842 unsigned int len = 0;
53843 unsigned int i;
53844
53845+ pax_track_stack();
53846+
53847 len += snprintf(buf+len, sizeof(buf)-len,
53848 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
53849
53850@@ -337,6 +341,9 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
53851
53852 static ssize_t write_file_debug(struct file *file,
53853 const char __user *userbuf,
53854+ size_t count, loff_t *ppos) __size_overflow(3);
53855+static ssize_t write_file_debug(struct file *file,
53856+ const char __user *userbuf,
53857 size_t count, loff_t *ppos)
53858 {
53859 struct ath5k_softc *sc = file->private_data;
53860diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
53861index 2be4c22..a8ad784 100644
53862--- a/drivers/net/wireless/ath/ath9k/debug.c
53863+++ b/drivers/net/wireless/ath/ath9k/debug.c
53864@@ -56,6 +56,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
53865 }
53866
53867 static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
53868+ size_t count, loff_t *ppos) __size_overflow(3);
53869+static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
53870 size_t count, loff_t *ppos)
53871 {
53872 struct ath_softc *sc = file->private_data;
53873@@ -220,6 +222,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
53874 char buf[512];
53875 unsigned int len = 0;
53876
53877+ pax_track_stack();
53878+
53879 len += snprintf(buf + len, sizeof(buf) - len,
53880 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
53881 len += snprintf(buf + len, sizeof(buf) - len,
53882@@ -360,6 +364,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
53883 int i;
53884 u8 addr[ETH_ALEN];
53885
53886+ pax_track_stack();
53887+
53888 len += snprintf(buf + len, sizeof(buf) - len,
53889 "primary: %s (%s chan=%d ht=%d)\n",
53890 wiphy_name(sc->pri_wiphy->hw->wiphy),
53891diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
53892index 80b19a4..dab3a45 100644
53893--- a/drivers/net/wireless/b43/debugfs.c
53894+++ b/drivers/net/wireless/b43/debugfs.c
53895@@ -43,7 +43,7 @@ static struct dentry *rootdir;
53896 struct b43_debugfs_fops {
53897 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
53898 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
53899- struct file_operations fops;
53900+ const struct file_operations fops;
53901 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
53902 size_t file_struct_offset;
53903 };
53904diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
53905index 1f85ac5..c99b4b4 100644
53906--- a/drivers/net/wireless/b43legacy/debugfs.c
53907+++ b/drivers/net/wireless/b43legacy/debugfs.c
53908@@ -44,7 +44,7 @@ static struct dentry *rootdir;
53909 struct b43legacy_debugfs_fops {
53910 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
53911 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
53912- struct file_operations fops;
53913+ const struct file_operations fops;
53914 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
53915 size_t file_struct_offset;
53916 /* Take wl->irq_lock before calling read/write? */
53917diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
53918index 43102bf..3b569c3 100644
53919--- a/drivers/net/wireless/ipw2x00/ipw2100.c
53920+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
53921@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
53922 int err;
53923 DECLARE_SSID_BUF(ssid);
53924
53925+ pax_track_stack();
53926+
53927 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
53928
53929 if (ssid_len)
53930@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
53931 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
53932 int err;
53933
53934+ pax_track_stack();
53935+
53936 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
53937 idx, keylen, len);
53938
53939diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
53940index 282b1f7..169f0cf 100644
53941--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
53942+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
53943@@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device
53944 unsigned long flags;
53945 DECLARE_SSID_BUF(ssid);
53946
53947+ pax_track_stack();
53948+
53949 LIBIPW_DEBUG_SCAN("'%s' (%pM"
53950 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
53951 print_ssid(ssid, info_element->data, info_element->len),
53952diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
53953index 950267a..80d5fd2 100644
53954--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
53955+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
53956@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = {
53957 },
53958 };
53959
53960-static struct iwl_ops iwl1000_ops = {
53961+static const struct iwl_ops iwl1000_ops = {
53962 .ucode = &iwl5000_ucode,
53963 .lib = &iwl1000_lib,
53964 .hcmd = &iwl5000_hcmd,
53965diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
53966index 56bfcc3..b348020 100644
53967--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
53968+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
53969@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
53970 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
53971 };
53972
53973-static struct iwl_ops iwl3945_ops = {
53974+static const struct iwl_ops iwl3945_ops = {
53975 .ucode = &iwl3945_ucode,
53976 .lib = &iwl3945_lib,
53977 .hcmd = &iwl3945_hcmd,
53978diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
53979index 585b8d4..e142963 100644
53980--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
53981+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
53982@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = {
53983 },
53984 };
53985
53986-static struct iwl_ops iwl4965_ops = {
53987+static const struct iwl_ops iwl4965_ops = {
53988 .ucode = &iwl4965_ucode,
53989 .lib = &iwl4965_lib,
53990 .hcmd = &iwl4965_hcmd,
53991diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
53992index 1f423f2..e37c192 100644
53993--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
53994+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
53995@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = {
53996 },
53997 };
53998
53999-struct iwl_ops iwl5000_ops = {
54000+const struct iwl_ops iwl5000_ops = {
54001 .ucode = &iwl5000_ucode,
54002 .lib = &iwl5000_lib,
54003 .hcmd = &iwl5000_hcmd,
54004 .utils = &iwl5000_hcmd_utils,
54005 };
54006
54007-static struct iwl_ops iwl5150_ops = {
54008+static const struct iwl_ops iwl5150_ops = {
54009 .ucode = &iwl5000_ucode,
54010 .lib = &iwl5150_lib,
54011 .hcmd = &iwl5000_hcmd,
54012diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
54013index 1473452..f07d5e1 100644
54014--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
54015+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
54016@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
54017 .calc_rssi = iwl5000_calc_rssi,
54018 };
54019
54020-static struct iwl_ops iwl6000_ops = {
54021+static const struct iwl_ops iwl6000_ops = {
54022 .ucode = &iwl5000_ucode,
54023 .lib = &iwl6000_lib,
54024 .hcmd = &iwl5000_hcmd,
54025diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
54026index 1a3dfa2..b3e0a61 100644
54027--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
54028+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
54029@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
54030 u8 active_index = 0;
54031 s32 tpt = 0;
54032
54033+ pax_track_stack();
54034+
54035 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
54036
54037 if (!ieee80211_is_data(hdr->frame_control) ||
54038@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
54039 u8 valid_tx_ant = 0;
54040 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
54041
54042+ pax_track_stack();
54043+
54044 /* Override starting rate (index 0) if needed for debug purposes */
54045 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
54046
54047diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
54048index 0e56d78..6a3c107 100644
54049--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
54050+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
54051@@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
54052 if (iwl_debug_level & IWL_DL_INFO)
54053 dev_printk(KERN_DEBUG, &(pdev->dev),
54054 "Disabling hw_scan\n");
54055- iwl_hw_ops.hw_scan = NULL;
54056+ pax_open_kernel();
54057+ *(void **)&iwl_hw_ops.hw_scan = NULL;
54058+ pax_close_kernel();
54059 }
54060
54061 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
54062diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
54063index cbc6290..eb323d7 100644
54064--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
54065+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
54066@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
54067 #endif
54068
54069 #else
54070-#define IWL_DEBUG(__priv, level, fmt, args...)
54071-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
54072+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
54073+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
54074 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
54075 void *p, u32 len)
54076 {}
54077diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
54078index a198bcf..8e68233 100644
54079--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
54080+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
54081@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
54082 int pos = 0;
54083 const size_t bufsz = sizeof(buf);
54084
54085+ pax_track_stack();
54086+
54087 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
54088 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
54089 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
54090@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
54091 const size_t bufsz = sizeof(buf);
54092 ssize_t ret;
54093
54094+ pax_track_stack();
54095+
54096 for (i = 0; i < AC_NUM; i++) {
54097 pos += scnprintf(buf + pos, bufsz - pos,
54098 "\tcw_min\tcw_max\taifsn\ttxop\n");
54099diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
54100index 3539ea4..b174bfa 100644
54101--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
54102+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
54103@@ -68,7 +68,7 @@ struct iwl_tx_queue;
54104
54105 /* shared structures from iwl-5000.c */
54106 extern struct iwl_mod_params iwl50_mod_params;
54107-extern struct iwl_ops iwl5000_ops;
54108+extern const struct iwl_ops iwl5000_ops;
54109 extern struct iwl_ucode_ops iwl5000_ucode;
54110 extern struct iwl_lib_ops iwl5000_lib;
54111 extern struct iwl_hcmd_ops iwl5000_hcmd;
54112diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
54113index 619590d..69235ee 100644
54114--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
54115+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
54116@@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
54117 */
54118 if (iwl3945_mod_params.disable_hw_scan) {
54119 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
54120- iwl3945_hw_ops.hw_scan = NULL;
54121+ pax_open_kernel();
54122+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
54123+ pax_close_kernel();
54124 }
54125
54126
54127diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
54128index 1465379..fe4d78b 100644
54129--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
54130+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
54131@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
54132 int buf_len = 512;
54133 size_t len = 0;
54134
54135+ pax_track_stack();
54136+
54137 if (*ppos != 0)
54138 return 0;
54139 if (count < sizeof(buf))
54140diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
54141index 893a55c..7f66a50 100644
54142--- a/drivers/net/wireless/libertas/debugfs.c
54143+++ b/drivers/net/wireless/libertas/debugfs.c
54144@@ -708,7 +708,7 @@ out_unlock:
54145 struct lbs_debugfs_files {
54146 const char *name;
54147 int perm;
54148- struct file_operations fops;
54149+ const struct file_operations fops;
54150 };
54151
54152 static const struct lbs_debugfs_files debugfs_files[] = {
54153diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
54154index 2ecbedb..42704f0 100644
54155--- a/drivers/net/wireless/rndis_wlan.c
54156+++ b/drivers/net/wireless/rndis_wlan.c
54157@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
54158
54159 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
54160
54161- if (rts_threshold < 0 || rts_threshold > 2347)
54162+ if (rts_threshold > 2347)
54163 rts_threshold = 2347;
54164
54165 tmp = cpu_to_le32(rts_threshold);
54166diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
54167index 334ccd6..47f8944 100644
54168--- a/drivers/oprofile/buffer_sync.c
54169+++ b/drivers/oprofile/buffer_sync.c
54170@@ -342,7 +342,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
54171 if (cookie == NO_COOKIE)
54172 offset = pc;
54173 if (cookie == INVALID_COOKIE) {
54174- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
54175+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
54176 offset = pc;
54177 }
54178 if (cookie != last_cookie) {
54179@@ -386,14 +386,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
54180 /* add userspace sample */
54181
54182 if (!mm) {
54183- atomic_inc(&oprofile_stats.sample_lost_no_mm);
54184+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
54185 return 0;
54186 }
54187
54188 cookie = lookup_dcookie(mm, s->eip, &offset);
54189
54190 if (cookie == INVALID_COOKIE) {
54191- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
54192+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
54193 return 0;
54194 }
54195
54196@@ -562,7 +562,7 @@ void sync_buffer(int cpu)
54197 /* ignore backtraces if failed to add a sample */
54198 if (state == sb_bt_start) {
54199 state = sb_bt_ignore;
54200- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
54201+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
54202 }
54203 }
54204 release_mm(mm);
54205diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
54206index 5df60a6..72f5c1c 100644
54207--- a/drivers/oprofile/event_buffer.c
54208+++ b/drivers/oprofile/event_buffer.c
54209@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
54210 }
54211
54212 if (buffer_pos == buffer_size) {
54213- atomic_inc(&oprofile_stats.event_lost_overflow);
54214+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
54215 return;
54216 }
54217
54218diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
54219index dc8a042..fe5f315 100644
54220--- a/drivers/oprofile/oprof.c
54221+++ b/drivers/oprofile/oprof.c
54222@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
54223 if (oprofile_ops.switch_events())
54224 return;
54225
54226- atomic_inc(&oprofile_stats.multiplex_counter);
54227+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
54228 start_switch_worker();
54229 }
54230
54231diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
54232index bbd7516..1f97f55 100644
54233--- a/drivers/oprofile/oprofile_files.c
54234+++ b/drivers/oprofile/oprofile_files.c
54235@@ -36,6 +36,8 @@ static ssize_t timeout_read(struct file *file, char __user *buf,
54236
54237
54238 static ssize_t timeout_write(struct file *file, char const __user *buf,
54239+ size_t count, loff_t *offset) __size_overflow(3);
54240+static ssize_t timeout_write(struct file *file, char const __user *buf,
54241 size_t count, loff_t *offset)
54242 {
54243 unsigned long val;
54244@@ -71,6 +73,7 @@ static ssize_t depth_read(struct file *file, char __user *buf, size_t count, lof
54245 }
54246
54247
54248+static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
54249 static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
54250 {
54251 unsigned long val;
54252@@ -119,12 +122,14 @@ static const struct file_operations cpu_type_fops = {
54253 };
54254
54255
54256+static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset) __size_overflow(3);
54257 static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
54258 {
54259 return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
54260 }
54261
54262
54263+static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
54264 static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
54265 {
54266 unsigned long val;
54267diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
54268index 61689e8..387f7f8 100644
54269--- a/drivers/oprofile/oprofile_stats.c
54270+++ b/drivers/oprofile/oprofile_stats.c
54271@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
54272 cpu_buf->sample_invalid_eip = 0;
54273 }
54274
54275- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
54276- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
54277- atomic_set(&oprofile_stats.event_lost_overflow, 0);
54278- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
54279- atomic_set(&oprofile_stats.multiplex_counter, 0);
54280+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
54281+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
54282+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
54283+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
54284+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
54285 }
54286
54287
54288diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
54289index 0b54e46..a37c527 100644
54290--- a/drivers/oprofile/oprofile_stats.h
54291+++ b/drivers/oprofile/oprofile_stats.h
54292@@ -13,11 +13,11 @@
54293 #include <asm/atomic.h>
54294
54295 struct oprofile_stat_struct {
54296- atomic_t sample_lost_no_mm;
54297- atomic_t sample_lost_no_mapping;
54298- atomic_t bt_lost_no_mapping;
54299- atomic_t event_lost_overflow;
54300- atomic_t multiplex_counter;
54301+ atomic_unchecked_t sample_lost_no_mm;
54302+ atomic_unchecked_t sample_lost_no_mapping;
54303+ atomic_unchecked_t bt_lost_no_mapping;
54304+ atomic_unchecked_t event_lost_overflow;
54305+ atomic_unchecked_t multiplex_counter;
54306 };
54307
54308 extern struct oprofile_stat_struct oprofile_stats;
54309diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
54310index 2766a6d..4d533c7 100644
54311--- a/drivers/oprofile/oprofilefs.c
54312+++ b/drivers/oprofile/oprofilefs.c
54313@@ -89,6 +89,7 @@ static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count
54314 }
54315
54316
54317+static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
54318 static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
54319 {
54320 unsigned long *value = file->private_data;
54321@@ -187,7 +188,7 @@ static const struct file_operations atomic_ro_fops = {
54322
54323
54324 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
54325- char const *name, atomic_t *val)
54326+ char const *name, atomic_unchecked_t *val)
54327 {
54328 struct dentry *d = __oprofilefs_create_file(sb, root, name,
54329 &atomic_ro_fops, 0444);
54330diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
54331index 13a64bc..ad62835 100644
54332--- a/drivers/parisc/pdc_stable.c
54333+++ b/drivers/parisc/pdc_stable.c
54334@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
54335 return ret;
54336 }
54337
54338-static struct sysfs_ops pdcspath_attr_ops = {
54339+static const struct sysfs_ops pdcspath_attr_ops = {
54340 .show = pdcspath_attr_show,
54341 .store = pdcspath_attr_store,
54342 };
54343diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
54344index 8eefe56..40751a7 100644
54345--- a/drivers/parport/procfs.c
54346+++ b/drivers/parport/procfs.c
54347@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
54348
54349 *ppos += len;
54350
54351- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
54352+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
54353 }
54354
54355 #ifdef CONFIG_PARPORT_1284
54356@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
54357
54358 *ppos += len;
54359
54360- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
54361+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
54362 }
54363 #endif /* IEEE1284.3 support. */
54364
54365diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
54366index 73e7d8e..c80f3d2 100644
54367--- a/drivers/pci/hotplug/acpiphp_glue.c
54368+++ b/drivers/pci/hotplug/acpiphp_glue.c
54369@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
54370 }
54371
54372
54373-static struct acpi_dock_ops acpiphp_dock_ops = {
54374+static const struct acpi_dock_ops acpiphp_dock_ops = {
54375 .handler = handle_hotplug_event_func,
54376 };
54377
54378diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
54379index 9fff878..ad0ad53 100644
54380--- a/drivers/pci/hotplug/cpci_hotplug.h
54381+++ b/drivers/pci/hotplug/cpci_hotplug.h
54382@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
54383 int (*hardware_test) (struct slot* slot, u32 value);
54384 u8 (*get_power) (struct slot* slot);
54385 int (*set_power) (struct slot* slot, int value);
54386-};
54387+} __no_const;
54388
54389 struct cpci_hp_controller {
54390 unsigned int irq;
54391diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
54392index 76ba8a1..20ca857 100644
54393--- a/drivers/pci/hotplug/cpqphp_nvram.c
54394+++ b/drivers/pci/hotplug/cpqphp_nvram.c
54395@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
54396
54397 void compaq_nvram_init (void __iomem *rom_start)
54398 {
54399+
54400+#ifndef CONFIG_PAX_KERNEXEC
54401 if (rom_start) {
54402 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
54403 }
54404+#endif
54405+
54406 dbg("int15 entry = %p\n", compaq_int15_entry_point);
54407
54408 /* initialize our int15 lock */
54409diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
54410index 6151389..0a894ef 100644
54411--- a/drivers/pci/hotplug/fakephp.c
54412+++ b/drivers/pci/hotplug/fakephp.c
54413@@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj)
54414 }
54415
54416 static struct kobj_type legacy_ktype = {
54417- .sysfs_ops = &(struct sysfs_ops){
54418+ .sysfs_ops = &(const struct sysfs_ops){
54419 .store = legacy_store, .show = legacy_show
54420 },
54421 .release = &legacy_release,
54422diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
54423index 5b680df..fe05b7e 100644
54424--- a/drivers/pci/intel-iommu.c
54425+++ b/drivers/pci/intel-iommu.c
54426@@ -2643,7 +2643,7 @@ error:
54427 return 0;
54428 }
54429
54430-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
54431+dma_addr_t intel_map_page(struct device *dev, struct page *page,
54432 unsigned long offset, size_t size,
54433 enum dma_data_direction dir,
54434 struct dma_attrs *attrs)
54435@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
54436 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
54437 }
54438
54439-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
54440+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
54441 size_t size, enum dma_data_direction dir,
54442 struct dma_attrs *attrs)
54443 {
54444@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
54445 }
54446 }
54447
54448-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
54449+void *intel_alloc_coherent(struct device *hwdev, size_t size,
54450 dma_addr_t *dma_handle, gfp_t flags)
54451 {
54452 void *vaddr;
54453@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
54454 return NULL;
54455 }
54456
54457-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
54458+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
54459 dma_addr_t dma_handle)
54460 {
54461 int order;
54462@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
54463 free_pages((unsigned long)vaddr, order);
54464 }
54465
54466-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
54467+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
54468 int nelems, enum dma_data_direction dir,
54469 struct dma_attrs *attrs)
54470 {
54471@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
54472 return nelems;
54473 }
54474
54475-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
54476+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
54477 enum dma_data_direction dir, struct dma_attrs *attrs)
54478 {
54479 int i;
54480@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
54481 return nelems;
54482 }
54483
54484-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
54485+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
54486 {
54487 return !dma_addr;
54488 }
54489
54490-struct dma_map_ops intel_dma_ops = {
54491+const struct dma_map_ops intel_dma_ops = {
54492 .alloc_coherent = intel_alloc_coherent,
54493 .free_coherent = intel_free_coherent,
54494 .map_sg = intel_map_sg,
54495diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
54496index 5b7056c..607bc94 100644
54497--- a/drivers/pci/pcie/aspm.c
54498+++ b/drivers/pci/pcie/aspm.c
54499@@ -27,9 +27,9 @@
54500 #define MODULE_PARAM_PREFIX "pcie_aspm."
54501
54502 /* Note: those are not register definitions */
54503-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
54504-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
54505-#define ASPM_STATE_L1 (4) /* L1 state */
54506+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
54507+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
54508+#define ASPM_STATE_L1 (4U) /* L1 state */
54509 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
54510 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
54511
54512diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
54513index 8105e32..ca10419 100644
54514--- a/drivers/pci/probe.c
54515+++ b/drivers/pci/probe.c
54516@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
54517 return ret;
54518 }
54519
54520-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
54521+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
54522 struct device_attribute *attr,
54523 char *buf)
54524 {
54525 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
54526 }
54527
54528-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
54529+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
54530 struct device_attribute *attr,
54531 char *buf)
54532 {
54533diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
54534index a03ad8c..024b0da 100644
54535--- a/drivers/pci/proc.c
54536+++ b/drivers/pci/proc.c
54537@@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
54538 static int __init pci_proc_init(void)
54539 {
54540 struct pci_dev *dev = NULL;
54541+
54542+#ifdef CONFIG_GRKERNSEC_PROC_ADD
54543+#ifdef CONFIG_GRKERNSEC_PROC_USER
54544+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
54545+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54546+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
54547+#endif
54548+#else
54549 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
54550+#endif
54551 proc_create("devices", 0, proc_bus_pci_dir,
54552 &proc_bus_pci_dev_operations);
54553 proc_initialized = 1;
54554diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
54555index 8c02b6c..5584d8e 100644
54556--- a/drivers/pci/slot.c
54557+++ b/drivers/pci/slot.c
54558@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj,
54559 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
54560 }
54561
54562-static struct sysfs_ops pci_slot_sysfs_ops = {
54563+static const struct sysfs_ops pci_slot_sysfs_ops = {
54564 .show = pci_slot_attr_show,
54565 .store = pci_slot_attr_store,
54566 };
54567diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
54568index 30cf71d2..50938f1 100644
54569--- a/drivers/pcmcia/pcmcia_ioctl.c
54570+++ b/drivers/pcmcia/pcmcia_ioctl.c
54571@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
54572 return -EFAULT;
54573 }
54574 }
54575- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
54576+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
54577 if (!buf)
54578 return -ENOMEM;
54579
54580diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
54581index 52183c4..b224c69 100644
54582--- a/drivers/platform/x86/acer-wmi.c
54583+++ b/drivers/platform/x86/acer-wmi.c
54584@@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd)
54585 return 0;
54586 }
54587
54588-static struct backlight_ops acer_bl_ops = {
54589+static const struct backlight_ops acer_bl_ops = {
54590 .get_brightness = read_brightness,
54591 .update_status = update_bl_status,
54592 };
54593diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
54594index 767cb61..a87380b 100644
54595--- a/drivers/platform/x86/asus-laptop.c
54596+++ b/drivers/platform/x86/asus-laptop.c
54597@@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device;
54598 */
54599 static int read_brightness(struct backlight_device *bd);
54600 static int update_bl_status(struct backlight_device *bd);
54601-static struct backlight_ops asusbl_ops = {
54602+static const struct backlight_ops asusbl_ops = {
54603 .get_brightness = read_brightness,
54604 .update_status = update_bl_status,
54605 };
54606diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
54607index d66c07a..a4abaac 100644
54608--- a/drivers/platform/x86/asus_acpi.c
54609+++ b/drivers/platform/x86/asus_acpi.c
54610@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
54611 return 0;
54612 }
54613
54614-static struct backlight_ops asus_backlight_data = {
54615+static const struct backlight_ops asus_backlight_data = {
54616 .get_brightness = read_brightness,
54617 .update_status = set_brightness_status,
54618 };
54619diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
54620index 11003bb..550ff1b 100644
54621--- a/drivers/platform/x86/compal-laptop.c
54622+++ b/drivers/platform/x86/compal-laptop.c
54623@@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b)
54624 return set_lcd_level(b->props.brightness);
54625 }
54626
54627-static struct backlight_ops compalbl_ops = {
54628+static const struct backlight_ops compalbl_ops = {
54629 .get_brightness = bl_get_brightness,
54630 .update_status = bl_update_status,
54631 };
54632diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
54633index 07a74da..9dc99fa 100644
54634--- a/drivers/platform/x86/dell-laptop.c
54635+++ b/drivers/platform/x86/dell-laptop.c
54636@@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd)
54637 return buffer.output[1];
54638 }
54639
54640-static struct backlight_ops dell_ops = {
54641+static const struct backlight_ops dell_ops = {
54642 .get_brightness = dell_get_intensity,
54643 .update_status = dell_send_intensity,
54644 };
54645diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
54646index c533b1c..5c81f22 100644
54647--- a/drivers/platform/x86/eeepc-laptop.c
54648+++ b/drivers/platform/x86/eeepc-laptop.c
54649@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device;
54650 */
54651 static int read_brightness(struct backlight_device *bd);
54652 static int update_bl_status(struct backlight_device *bd);
54653-static struct backlight_ops eeepcbl_ops = {
54654+static const struct backlight_ops eeepcbl_ops = {
54655 .get_brightness = read_brightness,
54656 .update_status = update_bl_status,
54657 };
54658diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
54659index bcd4ba8..a249b35 100644
54660--- a/drivers/platform/x86/fujitsu-laptop.c
54661+++ b/drivers/platform/x86/fujitsu-laptop.c
54662@@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b)
54663 return ret;
54664 }
54665
54666-static struct backlight_ops fujitsubl_ops = {
54667+static const struct backlight_ops fujitsubl_ops = {
54668 .get_brightness = bl_get_brightness,
54669 .update_status = bl_update_status,
54670 };
54671diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
54672index 759763d..1093ba2 100644
54673--- a/drivers/platform/x86/msi-laptop.c
54674+++ b/drivers/platform/x86/msi-laptop.c
54675@@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b)
54676 return set_lcd_level(b->props.brightness);
54677 }
54678
54679-static struct backlight_ops msibl_ops = {
54680+static const struct backlight_ops msibl_ops = {
54681 .get_brightness = bl_get_brightness,
54682 .update_status = bl_update_status,
54683 };
54684diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
54685index fe7cf01..9012d8d 100644
54686--- a/drivers/platform/x86/panasonic-laptop.c
54687+++ b/drivers/platform/x86/panasonic-laptop.c
54688@@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
54689 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
54690 }
54691
54692-static struct backlight_ops pcc_backlight_ops = {
54693+static const struct backlight_ops pcc_backlight_ops = {
54694 .get_brightness = bl_get,
54695 .update_status = bl_set_status,
54696 };
54697diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
54698index a2a742c..b37e25e 100644
54699--- a/drivers/platform/x86/sony-laptop.c
54700+++ b/drivers/platform/x86/sony-laptop.c
54701@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
54702 }
54703
54704 static struct backlight_device *sony_backlight_device;
54705-static struct backlight_ops sony_backlight_ops = {
54706+static const struct backlight_ops sony_backlight_ops = {
54707 .update_status = sony_backlight_update_status,
54708 .get_brightness = sony_backlight_get_brightness,
54709 };
54710diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
54711index 68271ae..5e8fb10 100644
54712--- a/drivers/platform/x86/thinkpad_acpi.c
54713+++ b/drivers/platform/x86/thinkpad_acpi.c
54714@@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
54715 return 0;
54716 }
54717
54718-void static hotkey_mask_warn_incomplete_mask(void)
54719+static void hotkey_mask_warn_incomplete_mask(void)
54720 {
54721 /* log only what the user can fix... */
54722 const u32 wantedmask = hotkey_driver_mask &
54723@@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void)
54724 BACKLIGHT_UPDATE_HOTKEY);
54725 }
54726
54727-static struct backlight_ops ibm_backlight_data = {
54728+static const struct backlight_ops ibm_backlight_data = {
54729 .get_brightness = brightness_get,
54730 .update_status = brightness_update_status,
54731 };
54732diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
54733index 51c0a8b..0786629 100644
54734--- a/drivers/platform/x86/toshiba_acpi.c
54735+++ b/drivers/platform/x86/toshiba_acpi.c
54736@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
54737 return AE_OK;
54738 }
54739
54740-static struct backlight_ops toshiba_backlight_data = {
54741+static const struct backlight_ops toshiba_backlight_data = {
54742 .get_brightness = get_lcd,
54743 .update_status = set_lcd_status,
54744 };
54745diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
54746index fc83783c..cf370d7 100644
54747--- a/drivers/pnp/pnpbios/bioscalls.c
54748+++ b/drivers/pnp/pnpbios/bioscalls.c
54749@@ -60,7 +60,7 @@ do { \
54750 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
54751 } while(0)
54752
54753-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
54754+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
54755 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
54756
54757 /*
54758@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
54759
54760 cpu = get_cpu();
54761 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
54762+
54763+ pax_open_kernel();
54764 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
54765+ pax_close_kernel();
54766
54767 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
54768 spin_lock_irqsave(&pnp_bios_lock, flags);
54769@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
54770 :"memory");
54771 spin_unlock_irqrestore(&pnp_bios_lock, flags);
54772
54773+ pax_open_kernel();
54774 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
54775+ pax_close_kernel();
54776+
54777 put_cpu();
54778
54779 /* If we get here and this is set then the PnP BIOS faulted on us. */
54780@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
54781 return status;
54782 }
54783
54784-void pnpbios_calls_init(union pnp_bios_install_struct *header)
54785+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
54786 {
54787 int i;
54788
54789@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
54790 pnp_bios_callpoint.offset = header->fields.pm16offset;
54791 pnp_bios_callpoint.segment = PNP_CS16;
54792
54793+ pax_open_kernel();
54794+
54795 for_each_possible_cpu(i) {
54796 struct desc_struct *gdt = get_cpu_gdt_table(i);
54797 if (!gdt)
54798@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
54799 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
54800 (unsigned long)__va(header->fields.pm16dseg));
54801 }
54802+
54803+ pax_close_kernel();
54804 }
54805diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
54806index ba97654..66b99d4 100644
54807--- a/drivers/pnp/resource.c
54808+++ b/drivers/pnp/resource.c
54809@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
54810 return 1;
54811
54812 /* check if the resource is valid */
54813- if (*irq < 0 || *irq > 15)
54814+ if (*irq > 15)
54815 return 0;
54816
54817 /* check if the resource is reserved */
54818@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
54819 return 1;
54820
54821 /* check if the resource is valid */
54822- if (*dma < 0 || *dma == 4 || *dma > 7)
54823+ if (*dma == 4 || *dma > 7)
54824 return 0;
54825
54826 /* check if the resource is reserved */
54827diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
54828index 62bb981..24a2dc9 100644
54829--- a/drivers/power/bq27x00_battery.c
54830+++ b/drivers/power/bq27x00_battery.c
54831@@ -44,7 +44,7 @@ struct bq27x00_device_info;
54832 struct bq27x00_access_methods {
54833 int (*read)(u8 reg, int *rt_value, int b_single,
54834 struct bq27x00_device_info *di);
54835-};
54836+} __no_const;
54837
54838 struct bq27x00_device_info {
54839 struct device *dev;
54840diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
54841index 62227cd..b5b538b 100644
54842--- a/drivers/rtc/rtc-dev.c
54843+++ b/drivers/rtc/rtc-dev.c
54844@@ -14,6 +14,7 @@
54845 #include <linux/module.h>
54846 #include <linux/rtc.h>
54847 #include <linux/sched.h>
54848+#include <linux/grsecurity.h>
54849 #include "rtc-core.h"
54850
54851 static dev_t rtc_devt;
54852@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file,
54853 if (copy_from_user(&tm, uarg, sizeof(tm)))
54854 return -EFAULT;
54855
54856+ gr_log_timechange();
54857+
54858 return rtc_set_time(rtc, &tm);
54859
54860 case RTC_PIE_ON:
54861diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
54862index 968e3c7..fbc637a 100644
54863--- a/drivers/s390/cio/qdio_perf.c
54864+++ b/drivers/s390/cio/qdio_perf.c
54865@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde;
54866 static int qdio_perf_proc_show(struct seq_file *m, void *v)
54867 {
54868 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
54869- (long)atomic_long_read(&perf_stats.qdio_int));
54870+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
54871 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
54872- (long)atomic_long_read(&perf_stats.pci_int));
54873+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
54874 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
54875- (long)atomic_long_read(&perf_stats.thin_int));
54876+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
54877 seq_printf(m, "\n");
54878 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
54879- (long)atomic_long_read(&perf_stats.tasklet_inbound));
54880+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
54881 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
54882- (long)atomic_long_read(&perf_stats.tasklet_outbound));
54883+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
54884 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
54885- (long)atomic_long_read(&perf_stats.tasklet_thinint),
54886- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
54887+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
54888+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
54889 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
54890- (long)atomic_long_read(&perf_stats.thinint_inbound),
54891- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
54892+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
54893+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
54894 seq_printf(m, "\n");
54895 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
54896- (long)atomic_long_read(&perf_stats.siga_in));
54897+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
54898 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
54899- (long)atomic_long_read(&perf_stats.siga_out));
54900+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
54901 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
54902- (long)atomic_long_read(&perf_stats.siga_sync));
54903+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
54904 seq_printf(m, "\n");
54905 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
54906- (long)atomic_long_read(&perf_stats.inbound_handler));
54907+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
54908 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
54909- (long)atomic_long_read(&perf_stats.outbound_handler));
54910+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
54911 seq_printf(m, "\n");
54912 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
54913- (long)atomic_long_read(&perf_stats.fast_requeue));
54914+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
54915 seq_printf(m, "Number of outbound target full condition\t: %li\n",
54916- (long)atomic_long_read(&perf_stats.outbound_target_full));
54917+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
54918 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
54919- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
54920+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
54921 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
54922- (long)atomic_long_read(&perf_stats.debug_stop_polling));
54923+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
54924 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
54925- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
54926+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
54927 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
54928- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
54929- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
54930+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
54931+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
54932 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
54933- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
54934- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
54935+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
54936+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
54937 seq_printf(m, "\n");
54938 return 0;
54939 }
54940diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
54941index ff4504c..b3604c3 100644
54942--- a/drivers/s390/cio/qdio_perf.h
54943+++ b/drivers/s390/cio/qdio_perf.h
54944@@ -13,46 +13,46 @@
54945
54946 struct qdio_perf_stats {
54947 /* interrupt handler calls */
54948- atomic_long_t qdio_int;
54949- atomic_long_t pci_int;
54950- atomic_long_t thin_int;
54951+ atomic_long_unchecked_t qdio_int;
54952+ atomic_long_unchecked_t pci_int;
54953+ atomic_long_unchecked_t thin_int;
54954
54955 /* tasklet runs */
54956- atomic_long_t tasklet_inbound;
54957- atomic_long_t tasklet_outbound;
54958- atomic_long_t tasklet_thinint;
54959- atomic_long_t tasklet_thinint_loop;
54960- atomic_long_t thinint_inbound;
54961- atomic_long_t thinint_inbound_loop;
54962- atomic_long_t thinint_inbound_loop2;
54963+ atomic_long_unchecked_t tasklet_inbound;
54964+ atomic_long_unchecked_t tasklet_outbound;
54965+ atomic_long_unchecked_t tasklet_thinint;
54966+ atomic_long_unchecked_t tasklet_thinint_loop;
54967+ atomic_long_unchecked_t thinint_inbound;
54968+ atomic_long_unchecked_t thinint_inbound_loop;
54969+ atomic_long_unchecked_t thinint_inbound_loop2;
54970
54971 /* signal adapter calls */
54972- atomic_long_t siga_out;
54973- atomic_long_t siga_in;
54974- atomic_long_t siga_sync;
54975+ atomic_long_unchecked_t siga_out;
54976+ atomic_long_unchecked_t siga_in;
54977+ atomic_long_unchecked_t siga_sync;
54978
54979 /* misc */
54980- atomic_long_t inbound_handler;
54981- atomic_long_t outbound_handler;
54982- atomic_long_t fast_requeue;
54983- atomic_long_t outbound_target_full;
54984+ atomic_long_unchecked_t inbound_handler;
54985+ atomic_long_unchecked_t outbound_handler;
54986+ atomic_long_unchecked_t fast_requeue;
54987+ atomic_long_unchecked_t outbound_target_full;
54988
54989 /* for debugging */
54990- atomic_long_t debug_tl_out_timer;
54991- atomic_long_t debug_stop_polling;
54992- atomic_long_t debug_eqbs_all;
54993- atomic_long_t debug_eqbs_incomplete;
54994- atomic_long_t debug_sqbs_all;
54995- atomic_long_t debug_sqbs_incomplete;
54996+ atomic_long_unchecked_t debug_tl_out_timer;
54997+ atomic_long_unchecked_t debug_stop_polling;
54998+ atomic_long_unchecked_t debug_eqbs_all;
54999+ atomic_long_unchecked_t debug_eqbs_incomplete;
55000+ atomic_long_unchecked_t debug_sqbs_all;
55001+ atomic_long_unchecked_t debug_sqbs_incomplete;
55002 };
55003
55004 extern struct qdio_perf_stats perf_stats;
55005 extern int qdio_performance_stats;
55006
55007-static inline void qdio_perf_stat_inc(atomic_long_t *count)
55008+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
55009 {
55010 if (qdio_performance_stats)
55011- atomic_long_inc(count);
55012+ atomic_long_inc_unchecked(count);
55013 }
55014
55015 int qdio_setup_perf_stats(void);
55016diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
55017new file mode 100644
55018index 0000000..7d18a18
55019--- /dev/null
55020+++ b/drivers/scsi/3w-sas.c
55021@@ -0,0 +1,1933 @@
55022+/*
55023+ 3w-sas.c -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
55024+
55025+ Written By: Adam Radford <linuxraid@lsi.com>
55026+
55027+ Copyright (C) 2009 LSI Corporation.
55028+
55029+ This program is free software; you can redistribute it and/or modify
55030+ it under the terms of the GNU General Public License as published by
55031+ the Free Software Foundation; version 2 of the License.
55032+
55033+ This program is distributed in the hope that it will be useful,
55034+ but WITHOUT ANY WARRANTY; without even the implied warranty of
55035+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
55036+ GNU General Public License for more details.
55037+
55038+ NO WARRANTY
55039+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
55040+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
55041+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
55042+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
55043+ solely responsible for determining the appropriateness of using and
55044+ distributing the Program and assumes all risks associated with its
55045+ exercise of rights under this Agreement, including but not limited to
55046+ the risks and costs of program errors, damage to or loss of data,
55047+ programs or equipment, and unavailability or interruption of operations.
55048+
55049+ DISCLAIMER OF LIABILITY
55050+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
55051+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55052+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
55053+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
55054+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
55055+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
55056+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
55057+
55058+ You should have received a copy of the GNU General Public License
55059+ along with this program; if not, write to the Free Software
55060+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
55061+
55062+ Controllers supported by this driver:
55063+
55064+ LSI 3ware 9750 6Gb/s SAS/SATA-RAID
55065+
55066+ Bugs/Comments/Suggestions should be mailed to:
55067+ linuxraid@lsi.com
55068+
55069+ For more information, goto:
55070+ http://www.lsi.com
55071+
55072+ History
55073+ -------
55074+ 3.26.00.000 - Initial driver release.
55075+*/
55076+
55077+#include <linux/module.h>
55078+#include <linux/reboot.h>
55079+#include <linux/spinlock.h>
55080+#include <linux/interrupt.h>
55081+#include <linux/moduleparam.h>
55082+#include <linux/errno.h>
55083+#include <linux/types.h>
55084+#include <linux/delay.h>
55085+#include <linux/pci.h>
55086+#include <linux/time.h>
55087+#include <linux/mutex.h>
55088+#include <linux/smp_lock.h>
55089+#include <asm/io.h>
55090+#include <asm/irq.h>
55091+#include <asm/uaccess.h>
55092+#include <scsi/scsi.h>
55093+#include <scsi/scsi_host.h>
55094+#include <scsi/scsi_tcq.h>
55095+#include <scsi/scsi_cmnd.h>
55096+#include "3w-sas.h"
55097+
55098+/* Globals */
55099+#define TW_DRIVER_VERSION "3.26.00.028-2.6.32RH"
55100+static TW_Device_Extension *twl_device_extension_list[TW_MAX_SLOT];
55101+static unsigned int twl_device_extension_count;
55102+static int twl_major = -1;
55103+extern struct timezone sys_tz;
55104+
55105+/* Module parameters */
55106+MODULE_AUTHOR ("LSI");
55107+MODULE_DESCRIPTION ("LSI 3ware SAS/SATA-RAID Linux Driver");
55108+MODULE_LICENSE("GPL");
55109+MODULE_VERSION(TW_DRIVER_VERSION);
55110+
55111+static int use_msi = 0;
55112+module_param(use_msi, int, S_IRUGO);
55113+MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
55114+
55115+/* Function prototypes */
55116+static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset);
55117+
55118+/* Functions */
55119+
55120+/* This function returns AENs through sysfs */
55121+static ssize_t twl_sysfs_aen_read(struct file *filp, struct kobject *kobj,
55122+ struct bin_attribute *bin_attr,
55123+ char *outbuf, loff_t offset, size_t count)
55124+{
55125+ struct device *dev = container_of(kobj, struct device, kobj);
55126+ struct Scsi_Host *shost = class_to_shost(dev);
55127+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata;
55128+ unsigned long flags = 0;
55129+ ssize_t ret;
55130+
55131+ if (!capable(CAP_SYS_ADMIN))
55132+ return -EACCES;
55133+
55134+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
55135+ ret = memory_read_from_buffer(outbuf, count, &offset, tw_dev->event_queue[0], sizeof(TW_Event) * TW_Q_LENGTH);
55136+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55137+
55138+ return ret;
55139+} /* End twl_sysfs_aen_read() */
55140+
55141+/* aen_read sysfs attribute initializer */
55142+static struct bin_attribute twl_sysfs_aen_read_attr = {
55143+ .attr = {
55144+ .name = "3ware_aen_read",
55145+ .mode = S_IRUSR,
55146+ },
55147+ .size = 0,
55148+ .read = twl_sysfs_aen_read
55149+};
55150+
55151+/* This function returns driver compatibility info through sysfs */
55152+static ssize_t twl_sysfs_compat_info(struct file *filp, struct kobject *kobj,
55153+ struct bin_attribute *bin_attr,
55154+ char *outbuf, loff_t offset, size_t count)
55155+{
55156+ struct device *dev = container_of(kobj, struct device, kobj);
55157+ struct Scsi_Host *shost = class_to_shost(dev);
55158+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata;
55159+ unsigned long flags = 0;
55160+ ssize_t ret;
55161+
55162+ if (!capable(CAP_SYS_ADMIN))
55163+ return -EACCES;
55164+
55165+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
55166+ ret = memory_read_from_buffer(outbuf, count, &offset, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
55167+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55168+
55169+ return ret;
55170+} /* End twl_sysfs_compat_info() */
55171+
55172+/* compat_info sysfs attribute initializer */
55173+static struct bin_attribute twl_sysfs_compat_info_attr = {
55174+ .attr = {
55175+ .name = "3ware_compat_info",
55176+ .mode = S_IRUSR,
55177+ },
55178+ .size = 0,
55179+ .read = twl_sysfs_compat_info
55180+};
55181+
55182+/* Show some statistics about the card */
55183+static ssize_t twl_show_stats(struct device *dev,
55184+ struct device_attribute *attr, char *buf)
55185+{
55186+ struct Scsi_Host *host = class_to_shost(dev);
55187+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
55188+ unsigned long flags = 0;
55189+ ssize_t len;
55190+
55191+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
55192+ len = snprintf(buf, PAGE_SIZE, "3w-sas Driver version: %s\n"
55193+ "Current commands posted: %4d\n"
55194+ "Max commands posted: %4d\n"
55195+ "Last sgl length: %4d\n"
55196+ "Max sgl length: %4d\n"
55197+ "Last sector count: %4d\n"
55198+ "Max sector count: %4d\n"
55199+ "SCSI Host Resets: %4d\n"
55200+ "AEN's: %4d\n",
55201+ TW_DRIVER_VERSION,
55202+ tw_dev->posted_request_count,
55203+ tw_dev->max_posted_request_count,
55204+ tw_dev->sgl_entries,
55205+ tw_dev->max_sgl_entries,
55206+ tw_dev->sector_count,
55207+ tw_dev->max_sector_count,
55208+ tw_dev->num_resets,
55209+ tw_dev->aen_count);
55210+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55211+ return len;
55212+} /* End twl_show_stats() */
55213+
55214+/* This function will set a devices queue depth */
55215+static int twl_change_queue_depth(struct scsi_device *sdev, int queue_depth,
55216+ int reason)
55217+{
55218+ if (reason != SCSI_QDEPTH_DEFAULT)
55219+ return -EOPNOTSUPP;
55220+
55221+ if (queue_depth > TW_Q_LENGTH-2)
55222+ queue_depth = TW_Q_LENGTH-2;
55223+ scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
55224+ return queue_depth;
55225+} /* End twl_change_queue_depth() */
55226+
55227+/* stats sysfs attribute initializer */
55228+static struct device_attribute twl_host_stats_attr = {
55229+ .attr = {
55230+ .name = "3ware_stats",
55231+ .mode = S_IRUGO,
55232+ },
55233+ .show = twl_show_stats
55234+};
55235+
55236+/* Host attributes initializer */
55237+static struct device_attribute *twl_host_attrs[] = {
55238+ &twl_host_stats_attr,
55239+ NULL,
55240+};
55241+
55242+/* This function will look up an AEN severity string */
55243+static char *twl_aen_severity_lookup(unsigned char severity_code)
55244+{
55245+ char *retval = NULL;
55246+
55247+ if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
55248+ (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
55249+ goto out;
55250+
55251+ retval = twl_aen_severity_table[severity_code];
55252+out:
55253+ return retval;
55254+} /* End twl_aen_severity_lookup() */
55255+
55256+/* This function will queue an event */
55257+static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
55258+{
55259+ u32 local_time;
55260+ struct timeval time;
55261+ TW_Event *event;
55262+ unsigned short aen;
55263+ char host[16];
55264+ char *error_str;
55265+
55266+ tw_dev->aen_count++;
55267+
55268+ /* Fill out event info */
55269+ event = tw_dev->event_queue[tw_dev->error_index];
55270+
55271+ host[0] = '\0';
55272+ if (tw_dev->host)
55273+ sprintf(host, " scsi%d:", tw_dev->host->host_no);
55274+
55275+ aen = le16_to_cpu(header->status_block.error);
55276+ memset(event, 0, sizeof(TW_Event));
55277+
55278+ event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
55279+ do_gettimeofday(&time);
55280+ local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
55281+ event->time_stamp_sec = local_time;
55282+ event->aen_code = aen;
55283+ event->retrieved = TW_AEN_NOT_RETRIEVED;
55284+ event->sequence_id = tw_dev->error_sequence_id;
55285+ tw_dev->error_sequence_id++;
55286+
55287+ /* Check for embedded error string */
55288+ error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
55289+
55290+ header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
55291+ event->parameter_len = strlen(header->err_specific_desc);
55292+ memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + 1 + strlen(error_str));
55293+ if (event->severity != TW_AEN_SEVERITY_DEBUG)
55294+ printk(KERN_WARNING "3w-sas:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
55295+ host,
55296+ twl_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
55297+ TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen, error_str,
55298+ header->err_specific_desc);
55299+ else
55300+ tw_dev->aen_count--;
55301+
55302+ tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
55303+} /* End twl_aen_queue_event() */
55304+
55305+/* This function will attempt to post a command packet to the board */
55306+static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
55307+{
55308+ dma_addr_t command_que_value;
55309+
55310+ command_que_value = tw_dev->command_packet_phys[request_id];
55311+ command_que_value += TW_COMMAND_OFFSET;
55312+
55313+ /* First write upper 4 bytes */
55314+ writel((u32)((u64)command_que_value >> 32), TWL_HIBQPH_REG_ADDR(tw_dev));
55315+ /* Then the lower 4 bytes */
55316+ writel((u32)(command_que_value | TWL_PULL_MODE), TWL_HIBQPL_REG_ADDR(tw_dev));
55317+
55318+ tw_dev->state[request_id] = TW_S_POSTED;
55319+ tw_dev->posted_request_count++;
55320+ if (tw_dev->posted_request_count > tw_dev->max_posted_request_count)
55321+ tw_dev->max_posted_request_count = tw_dev->posted_request_count;
55322+
55323+ return 0;
55324+} /* End twl_post_command_packet() */
55325+
55326+/* This function will perform a pci-dma mapping for a scatter gather list */
55327+static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
55328+{
55329+ int use_sg;
55330+ struct scsi_cmnd *cmd = tw_dev->srb[request_id];
55331+
55332+ use_sg = scsi_dma_map(cmd);
55333+ if (!use_sg)
55334+ return 0;
55335+ else if (use_sg < 0) {
55336+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list");
55337+ return 0;
55338+ }
55339+
55340+ cmd->SCp.phase = TW_PHASE_SGLIST;
55341+ cmd->SCp.have_data_in = use_sg;
55342+
55343+ return use_sg;
55344+} /* End twl_map_scsi_sg_data() */
55345+
55346+/* This function hands scsi cdb's to the firmware */
55347+static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
55348+{
55349+ TW_Command_Full *full_command_packet;
55350+ TW_Command_Apache *command_packet;
55351+ int i, sg_count;
55352+ struct scsi_cmnd *srb = NULL;
55353+ struct scatterlist *sglist = NULL, *sg;
55354+ int retval = 1;
55355+
55356+ if (tw_dev->srb[request_id]) {
55357+ srb = tw_dev->srb[request_id];
55358+ if (scsi_sglist(srb))
55359+ sglist = scsi_sglist(srb);
55360+ }
55361+
55362+ /* Initialize command packet */
55363+ full_command_packet = tw_dev->command_packet_virt[request_id];
55364+ full_command_packet->header.header_desc.size_header = 128;
55365+ full_command_packet->header.status_block.error = 0;
55366+ full_command_packet->header.status_block.severity__reserved = 0;
55367+
55368+ command_packet = &full_command_packet->command.newcommand;
55369+ command_packet->status = 0;
55370+ command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
55371+
55372+ /* We forced 16 byte cdb use earlier */
55373+ if (!cdb)
55374+ memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
55375+ else
55376+ memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
55377+
55378+ if (srb) {
55379+ command_packet->unit = srb->device->id;
55380+ command_packet->request_id__lunl =
55381+ cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
55382+ } else {
55383+ command_packet->request_id__lunl =
55384+ cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
55385+ command_packet->unit = 0;
55386+ }
55387+
55388+ command_packet->sgl_offset = 16;
55389+
55390+ if (!sglistarg) {
55391+ /* Map sglist from scsi layer to cmd packet */
55392+ if (scsi_sg_count(srb)) {
55393+ sg_count = twl_map_scsi_sg_data(tw_dev, request_id);
55394+ if (sg_count == 0)
55395+ goto out;
55396+
55397+ scsi_for_each_sg(srb, sg, sg_count, i) {
55398+ command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
55399+ command_packet->sg_list[i].length = TW_CPU_TO_SGL(sg_dma_len(sg));
55400+ }
55401+ command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
55402+ }
55403+ } else {
55404+ /* Internal cdb post */
55405+ for (i = 0; i < use_sg; i++) {
55406+ command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
55407+ command_packet->sg_list[i].length = TW_CPU_TO_SGL(sglistarg[i].length);
55408+ }
55409+ command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
55410+ }
55411+
55412+ /* Update some stats */
55413+ if (srb) {
55414+ tw_dev->sector_count = scsi_bufflen(srb) / 512;
55415+ if (tw_dev->sector_count > tw_dev->max_sector_count)
55416+ tw_dev->max_sector_count = tw_dev->sector_count;
55417+ tw_dev->sgl_entries = scsi_sg_count(srb);
55418+ if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
55419+ tw_dev->max_sgl_entries = tw_dev->sgl_entries;
55420+ }
55421+
55422+ /* Now post the command to the board */
55423+ retval = twl_post_command_packet(tw_dev, request_id);
55424+
55425+out:
55426+ return retval;
55427+} /* End twl_scsiop_execute_scsi() */
55428+
55429+/* This function will read the aen queue from the isr */
55430+static int twl_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
55431+{
55432+ char cdb[TW_MAX_CDB_LEN];
55433+ TW_SG_Entry_ISO sglist[1];
55434+ TW_Command_Full *full_command_packet;
55435+ int retval = 1;
55436+
55437+ full_command_packet = tw_dev->command_packet_virt[request_id];
55438+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
55439+
55440+ /* Initialize cdb */
55441+ memset(&cdb, 0, TW_MAX_CDB_LEN);
55442+ cdb[0] = REQUEST_SENSE; /* opcode */
55443+ cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
55444+
55445+ /* Initialize sglist */
55446+ memset(&sglist, 0, sizeof(TW_SG_Entry_ISO));
55447+ sglist[0].length = TW_SECTOR_SIZE;
55448+ sglist[0].address = tw_dev->generic_buffer_phys[request_id];
55449+
55450+ /* Mark internal command */
55451+ tw_dev->srb[request_id] = NULL;
55452+
55453+ /* Now post the command packet */
55454+ if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
55455+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Post failed while reading AEN queue");
55456+ goto out;
55457+ }
55458+ retval = 0;
55459+out:
55460+ return retval;
55461+} /* End twl_aen_read_queue() */
55462+
55463+/* This function will sync firmware time with the host time */
55464+static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
55465+{
55466+ u32 schedulertime;
55467+ struct timeval utc;
55468+ TW_Command_Full *full_command_packet;
55469+ TW_Command *command_packet;
55470+ TW_Param_Apache *param;
55471+ u32 local_time;
55472+
55473+ /* Fill out the command packet */
55474+ full_command_packet = tw_dev->command_packet_virt[request_id];
55475+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
55476+ command_packet = &full_command_packet->command.oldcommand;
55477+ command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
55478+ command_packet->request_id = request_id;
55479+ command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
55480+ command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE);
55481+ command_packet->size = TW_COMMAND_SIZE;
55482+ command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
55483+
55484+ /* Setup the param */
55485+ param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
55486+ memset(param, 0, TW_SECTOR_SIZE);
55487+ param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
55488+ param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
55489+ param->parameter_size_bytes = cpu_to_le16(4);
55490+
55491+ /* Convert system time in UTC to local time seconds since last
55492+ Sunday 12:00AM */
55493+ do_gettimeofday(&utc);
55494+ local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
55495+ schedulertime = local_time - (3 * 86400);
55496+ schedulertime = cpu_to_le32(schedulertime % 604800);
55497+
55498+ memcpy(param->data, &schedulertime, sizeof(u32));
55499+
55500+ /* Mark internal command */
55501+ tw_dev->srb[request_id] = NULL;
55502+
55503+ /* Now post the command */
55504+ twl_post_command_packet(tw_dev, request_id);
55505+} /* End twl_aen_sync_time() */
55506+
55507+/* This function will assign an available request id */
55508+static void twl_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
55509+{
55510+ *request_id = tw_dev->free_queue[tw_dev->free_head];
55511+ tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
55512+ tw_dev->state[*request_id] = TW_S_STARTED;
55513+} /* End twl_get_request_id() */
55514+
55515+/* This function will free a request id */
55516+static void twl_free_request_id(TW_Device_Extension *tw_dev, int request_id)
55517+{
55518+ tw_dev->free_queue[tw_dev->free_tail] = request_id;
55519+ tw_dev->state[request_id] = TW_S_FINISHED;
55520+ tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
55521+} /* End twl_free_request_id() */
55522+
55523+/* This function will complete an aen request from the isr */
55524+static int twl_aen_complete(TW_Device_Extension *tw_dev, int request_id)
55525+{
55526+ TW_Command_Full *full_command_packet;
55527+ TW_Command *command_packet;
55528+ TW_Command_Apache_Header *header;
55529+ unsigned short aen;
55530+ int retval = 1;
55531+
55532+ header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
55533+ tw_dev->posted_request_count--;
55534+ aen = le16_to_cpu(header->status_block.error);
55535+ full_command_packet = tw_dev->command_packet_virt[request_id];
55536+ command_packet = &full_command_packet->command.oldcommand;
55537+
55538+ /* First check for internal completion of set param for time sync */
55539+ if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
55540+ /* Keep reading the queue in case there are more aen's */
55541+ if (twl_aen_read_queue(tw_dev, request_id))
55542+ goto out2;
55543+ else {
55544+ retval = 0;
55545+ goto out;
55546+ }
55547+ }
55548+
55549+ switch (aen) {
55550+ case TW_AEN_QUEUE_EMPTY:
55551+ /* Quit reading the queue if this is the last one */
55552+ break;
55553+ case TW_AEN_SYNC_TIME_WITH_HOST:
55554+ twl_aen_sync_time(tw_dev, request_id);
55555+ retval = 0;
55556+ goto out;
55557+ default:
55558+ twl_aen_queue_event(tw_dev, header);
55559+
55560+ /* If there are more aen's, keep reading the queue */
55561+ if (twl_aen_read_queue(tw_dev, request_id))
55562+ goto out2;
55563+ else {
55564+ retval = 0;
55565+ goto out;
55566+ }
55567+ }
55568+ retval = 0;
55569+out2:
55570+ tw_dev->state[request_id] = TW_S_COMPLETED;
55571+ twl_free_request_id(tw_dev, request_id);
55572+ clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
55573+out:
55574+ return retval;
55575+} /* End twl_aen_complete() */
55576+
55577+/* This function will poll for a response */
55578+static int twl_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
55579+{
55580+ unsigned long before;
55581+ dma_addr_t mfa;
55582+ u32 regh, regl;
55583+ u32 response;
55584+ int retval = 1;
55585+ int found = 0;
55586+
55587+ before = jiffies;
55588+
55589+ while (!found) {
55590+ if (sizeof(dma_addr_t) > 4) {
55591+ regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev));
55592+ regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
55593+ mfa = ((u64)regh << 32) | regl;
55594+ } else
55595+ mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
55596+
55597+ response = (u32)mfa;
55598+
55599+ if (TW_RESID_OUT(response) == request_id)
55600+ found = 1;
55601+
55602+ if (time_after(jiffies, before + HZ * seconds))
55603+ goto out;
55604+
55605+ msleep(50);
55606+ }
55607+ retval = 0;
55608+out:
55609+ return retval;
55610+} /* End twl_poll_response() */
55611+
55612+/* This function will drain the aen queue */
55613+static int twl_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
55614+{
55615+ int request_id = 0;
55616+ char cdb[TW_MAX_CDB_LEN];
55617+ TW_SG_Entry_ISO sglist[1];
55618+ int finished = 0, count = 0;
55619+ TW_Command_Full *full_command_packet;
55620+ TW_Command_Apache_Header *header;
55621+ unsigned short aen;
55622+ int first_reset = 0, queue = 0, retval = 1;
55623+
55624+ if (no_check_reset)
55625+ first_reset = 0;
55626+ else
55627+ first_reset = 1;
55628+
55629+ full_command_packet = tw_dev->command_packet_virt[request_id];
55630+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
55631+
55632+ /* Initialize cdb */
55633+ memset(&cdb, 0, TW_MAX_CDB_LEN);
55634+ cdb[0] = REQUEST_SENSE; /* opcode */
55635+ cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
55636+
55637+ /* Initialize sglist */
55638+ memset(&sglist, 0, sizeof(TW_SG_Entry_ISO));
55639+ sglist[0].length = TW_SECTOR_SIZE;
55640+ sglist[0].address = tw_dev->generic_buffer_phys[request_id];
55641+
55642+ /* Mark internal command */
55643+ tw_dev->srb[request_id] = NULL;
55644+
55645+ do {
55646+ /* Send command to the board */
55647+ if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
55648+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "Error posting request sense");
55649+ goto out;
55650+ }
55651+
55652+ /* Now poll for completion */
55653+ if (twl_poll_response(tw_dev, request_id, 30)) {
55654+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "No valid response while draining AEN queue");
55655+ tw_dev->posted_request_count--;
55656+ goto out;
55657+ }
55658+
55659+ tw_dev->posted_request_count--;
55660+ header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
55661+ aen = le16_to_cpu(header->status_block.error);
55662+ queue = 0;
55663+ count++;
55664+
55665+ switch (aen) {
55666+ case TW_AEN_QUEUE_EMPTY:
55667+ if (first_reset != 1)
55668+ goto out;
55669+ else
55670+ finished = 1;
55671+ break;
55672+ case TW_AEN_SOFT_RESET:
55673+ if (first_reset == 0)
55674+ first_reset = 1;
55675+ else
55676+ queue = 1;
55677+ break;
55678+ case TW_AEN_SYNC_TIME_WITH_HOST:
55679+ break;
55680+ default:
55681+ queue = 1;
55682+ }
55683+
55684+ /* Now queue an event info */
55685+ if (queue)
55686+ twl_aen_queue_event(tw_dev, header);
55687+ } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
55688+
55689+ if (count == TW_MAX_AEN_DRAIN)
55690+ goto out;
55691+
55692+ retval = 0;
55693+out:
55694+ tw_dev->state[request_id] = TW_S_INITIAL;
55695+ return retval;
55696+} /* End twl_aen_drain_queue() */
55697+
55698+/* This function will allocate memory and check if it is correctly aligned */
55699+static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
55700+{
55701+ int i;
55702+ dma_addr_t dma_handle;
55703+ unsigned long *cpu_addr;
55704+ int retval = 1;
55705+
55706+ cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
55707+ if (!cpu_addr) {
55708+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
55709+ goto out;
55710+ }
55711+
55712+ memset(cpu_addr, 0, size*TW_Q_LENGTH);
55713+
55714+ for (i = 0; i < TW_Q_LENGTH; i++) {
55715+ switch(which) {
55716+ case 0:
55717+ tw_dev->command_packet_phys[i] = dma_handle+(i*size);
55718+ tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
55719+ break;
55720+ case 1:
55721+ tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
55722+ tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
55723+ break;
55724+ case 2:
55725+ tw_dev->sense_buffer_phys[i] = dma_handle+(i*size);
55726+ tw_dev->sense_buffer_virt[i] = (TW_Command_Apache_Header *)((unsigned char *)cpu_addr + (i*size));
55727+ break;
55728+ }
55729+ }
55730+ retval = 0;
55731+out:
55732+ return retval;
55733+} /* End twl_allocate_memory() */
55734+
55735+/* This function will load the request id and various sgls for ioctls */
55736+static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
55737+{
55738+ TW_Command *oldcommand;
55739+ TW_Command_Apache *newcommand;
55740+ TW_SG_Entry_ISO *sgl;
55741+ unsigned int pae = 0;
55742+
55743+ if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
55744+ pae = 1;
55745+
55746+ if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
55747+ newcommand = &full_command_packet->command.newcommand;
55748+ newcommand->request_id__lunl =
55749+ cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
55750+ if (length) {
55751+ newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
55752+ newcommand->sg_list[0].length = TW_CPU_TO_SGL(length);
55753+ }
55754+ newcommand->sgl_entries__lunh =
55755+ cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
55756+ } else {
55757+ oldcommand = &full_command_packet->command.oldcommand;
55758+ oldcommand->request_id = request_id;
55759+
55760+ if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
55761+ /* Load the sg list */
55762+ sgl = (TW_SG_Entry_ISO *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry_ISO)/4) + pae + (sizeof(dma_addr_t) > 4 ? 1 : 0));
55763+ sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
55764+ sgl->length = TW_CPU_TO_SGL(length);
55765+ oldcommand->size += pae;
55766+ oldcommand->size += sizeof(dma_addr_t) > 4 ? 1 : 0;
55767+ }
55768+ }
55769+} /* End twl_load_sgl() */
55770+
55771+/* This function handles ioctl for the character device
55772+ This interface is used by smartmontools open source software */
55773+static int twl_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
55774+{
55775+ long timeout;
55776+ unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
55777+ dma_addr_t dma_handle;
55778+ int request_id = 0;
55779+ TW_Ioctl_Driver_Command driver_command;
55780+ TW_Ioctl_Buf_Apache *tw_ioctl;
55781+ TW_Command_Full *full_command_packet;
55782+ TW_Device_Extension *tw_dev = twl_device_extension_list[iminor(inode)];
55783+ int retval = -EFAULT;
55784+ void __user *argp = (void __user *)arg;
55785+
55786+ /* Only let one of these through at a time */
55787+ if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
55788+ retval = -EINTR;
55789+ goto out;
55790+ }
55791+
55792+ /* First copy down the driver command */
55793+ if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
55794+ goto out2;
55795+
55796+ /* Check data buffer size */
55797+ if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
55798+ retval = -EINVAL;
55799+ goto out2;
55800+ }
55801+
55802+ /* Hardware can only do multiple of 512 byte transfers */
55803+ data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
55804+
55805+ /* Now allocate ioctl buf memory */
55806+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
55807+ if (!cpu_addr) {
55808+ retval = -ENOMEM;
55809+ goto out2;
55810+ }
55811+
55812+ tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
55813+
55814+ /* Now copy down the entire ioctl */
55815+ if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
55816+ goto out3;
55817+
55818+ /* See which ioctl we are doing */
55819+ switch (cmd) {
55820+ case TW_IOCTL_FIRMWARE_PASS_THROUGH:
55821+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
55822+ twl_get_request_id(tw_dev, &request_id);
55823+
55824+ /* Flag internal command */
55825+ tw_dev->srb[request_id] = NULL;
55826+
55827+ /* Flag chrdev ioctl */
55828+ tw_dev->chrdev_request_id = request_id;
55829+
55830+ full_command_packet = (TW_Command_Full *)&tw_ioctl->firmware_command;
55831+
55832+ /* Load request id and sglist for both command types */
55833+ twl_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
55834+
55835+ memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
55836+
55837+ /* Now post the command packet to the controller */
55838+ twl_post_command_packet(tw_dev, request_id);
55839+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55840+
55841+ timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
55842+
55843+ /* Now wait for command to complete */
55844+ timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
55845+
55846+ /* We timed out, and didn't get an interrupt */
55847+ if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
55848+ /* Now we need to reset the board */
55849+ printk(KERN_WARNING "3w-sas: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
55850+ tw_dev->host->host_no, TW_DRIVER, 0x6,
55851+ cmd);
55852+ retval = -EIO;
55853+ twl_reset_device_extension(tw_dev, 1);
55854+ goto out3;
55855+ }
55856+
55857+ /* Now copy in the command packet response */
55858+ memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
55859+
55860+ /* Now complete the io */
55861+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
55862+ tw_dev->posted_request_count--;
55863+ tw_dev->state[request_id] = TW_S_COMPLETED;
55864+ twl_free_request_id(tw_dev, request_id);
55865+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55866+ break;
55867+ default:
55868+ retval = -ENOTTY;
55869+ goto out3;
55870+ }
55871+
55872+ /* Now copy the entire response to userspace */
55873+ if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
55874+ retval = 0;
55875+out3:
55876+ /* Now free ioctl buf memory */
55877+ dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
55878+out2:
55879+ mutex_unlock(&tw_dev->ioctl_lock);
55880+out:
55881+ return retval;
55882+} /* End twl_chrdev_ioctl() */
55883+
55884+/* This function handles open for the character device */
55885+static int twl_chrdev_open(struct inode *inode, struct file *file)
55886+{
55887+ unsigned int minor_number;
55888+ int retval = -ENODEV;
55889+
55890+ if (!capable(CAP_SYS_ADMIN)) {
55891+ retval = -EACCES;
55892+ goto out;
55893+ }
55894+
55895+ cycle_kernel_lock();
55896+ minor_number = iminor(inode);
55897+ if (minor_number >= twl_device_extension_count)
55898+ goto out;
55899+ retval = 0;
55900+out:
55901+ return retval;
55902+} /* End twl_chrdev_open() */
55903+
55904+/* File operations struct for character device */
55905+static const struct file_operations twl_fops = {
55906+ .owner = THIS_MODULE,
55907+ .ioctl = twl_chrdev_ioctl,
55908+ .open = twl_chrdev_open,
55909+ .release = NULL
55910+};
55911+
55912+/* This function passes sense data from firmware to scsi layer */
55913+static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, int copy_sense, int print_host)
55914+{
55915+ TW_Command_Apache_Header *header;
55916+ TW_Command_Full *full_command_packet;
55917+ unsigned short error;
55918+ char *error_str;
55919+ int retval = 1;
55920+
55921+ header = tw_dev->sense_buffer_virt[i];
55922+ full_command_packet = tw_dev->command_packet_virt[request_id];
55923+
55924+ /* Get embedded firmware error string */
55925+ error_str = &(header->err_specific_desc[strlen(header->err_specific_desc) + 1]);
55926+
55927+ /* Don't print error for Logical unit not supported during rollcall */
55928+ error = le16_to_cpu(header->status_block.error);
55929+ if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE) && (error != TW_ERROR_INVALID_FIELD_IN_CDB)) {
55930+ if (print_host)
55931+ printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
55932+ tw_dev->host->host_no,
55933+ TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
55934+ header->status_block.error,
55935+ error_str,
55936+ header->err_specific_desc);
55937+ else
55938+ printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s:%s.\n",
55939+ TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
55940+ header->status_block.error,
55941+ error_str,
55942+ header->err_specific_desc);
55943+ }
55944+
55945+ if (copy_sense) {
55946+ memcpy(tw_dev->srb[request_id]->sense_buffer, header->sense_data, TW_SENSE_DATA_LENGTH);
55947+ tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
55948+ goto out;
55949+ }
55950+out:
55951+ return retval;
55952+} /* End twl_fill_sense() */
55953+
55954+/* This function will free up device extension resources */
55955+static void twl_free_device_extension(TW_Device_Extension *tw_dev)
55956+{
55957+ if (tw_dev->command_packet_virt[0])
55958+ pci_free_consistent(tw_dev->tw_pci_dev,
55959+ sizeof(TW_Command_Full)*TW_Q_LENGTH,
55960+ tw_dev->command_packet_virt[0],
55961+ tw_dev->command_packet_phys[0]);
55962+
55963+ if (tw_dev->generic_buffer_virt[0])
55964+ pci_free_consistent(tw_dev->tw_pci_dev,
55965+ TW_SECTOR_SIZE*TW_Q_LENGTH,
55966+ tw_dev->generic_buffer_virt[0],
55967+ tw_dev->generic_buffer_phys[0]);
55968+
55969+ if (tw_dev->sense_buffer_virt[0])
55970+ pci_free_consistent(tw_dev->tw_pci_dev,
55971+ sizeof(TW_Command_Apache_Header)*
55972+ TW_Q_LENGTH,
55973+ tw_dev->sense_buffer_virt[0],
55974+ tw_dev->sense_buffer_phys[0]);
55975+
55976+ kfree(tw_dev->event_queue[0]);
55977+} /* End twl_free_device_extension() */
55978+
55979+/* This function will get parameter table entries from the firmware */
55980+static void *twl_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
55981+{
55982+ TW_Command_Full *full_command_packet;
55983+ TW_Command *command_packet;
55984+ TW_Param_Apache *param;
55985+ void *retval = NULL;
55986+
55987+ /* Setup the command packet */
55988+ full_command_packet = tw_dev->command_packet_virt[request_id];
55989+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
55990+ command_packet = &full_command_packet->command.oldcommand;
55991+
55992+ command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
55993+ command_packet->size = TW_COMMAND_SIZE;
55994+ command_packet->request_id = request_id;
55995+ command_packet->byte6_offset.block_count = cpu_to_le16(1);
55996+
55997+ /* Now setup the param */
55998+ param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
55999+ memset(param, 0, TW_SECTOR_SIZE);
56000+ param->table_id = cpu_to_le16(table_id | 0x8000);
56001+ param->parameter_id = cpu_to_le16(parameter_id);
56002+ param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
56003+
56004+ command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
56005+ command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE);
56006+
56007+ /* Post the command packet to the board */
56008+ twl_post_command_packet(tw_dev, request_id);
56009+
56010+ /* Poll for completion */
56011+ if (twl_poll_response(tw_dev, request_id, 30))
56012+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "No valid response during get param")
56013+ else
56014+ retval = (void *)&(param->data[0]);
56015+
56016+ tw_dev->posted_request_count--;
56017+ tw_dev->state[request_id] = TW_S_INITIAL;
56018+
56019+ return retval;
56020+} /* End twl_get_param() */
56021+
56022+/* This function will send an initconnection command to controller */
56023+static int twl_initconnection(TW_Device_Extension *tw_dev, int message_credits,
56024+ u32 set_features, unsigned short current_fw_srl,
56025+ unsigned short current_fw_arch_id,
56026+ unsigned short current_fw_branch,
56027+ unsigned short current_fw_build,
56028+ unsigned short *fw_on_ctlr_srl,
56029+ unsigned short *fw_on_ctlr_arch_id,
56030+ unsigned short *fw_on_ctlr_branch,
56031+ unsigned short *fw_on_ctlr_build,
56032+ u32 *init_connect_result)
56033+{
56034+ TW_Command_Full *full_command_packet;
56035+ TW_Initconnect *tw_initconnect;
56036+ int request_id = 0, retval = 1;
56037+
56038+ /* Initialize InitConnection command packet */
56039+ full_command_packet = tw_dev->command_packet_virt[request_id];
56040+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
56041+ full_command_packet->header.header_desc.size_header = 128;
56042+
56043+ tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
56044+ tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
56045+ tw_initconnect->request_id = request_id;
56046+ tw_initconnect->message_credits = cpu_to_le16(message_credits);
56047+ tw_initconnect->features = set_features;
56048+
56049+ /* Turn on 64-bit sgl support if we need to */
56050+ tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
56051+
56052+ tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
56053+
56054+ if (set_features & TW_EXTENDED_INIT_CONNECT) {
56055+ tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
56056+ tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
56057+ tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
56058+ tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
56059+ tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
56060+ } else
56061+ tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
56062+
56063+ /* Send command packet to the board */
56064+ twl_post_command_packet(tw_dev, request_id);
56065+
56066+ /* Poll for completion */
56067+ if (twl_poll_response(tw_dev, request_id, 30)) {
56068+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x8, "No valid response during init connection");
56069+ } else {
56070+ if (set_features & TW_EXTENDED_INIT_CONNECT) {
56071+ *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
56072+ *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
56073+ *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
56074+ *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
56075+ *init_connect_result = le32_to_cpu(tw_initconnect->result);
56076+ }
56077+ retval = 0;
56078+ }
56079+
56080+ tw_dev->posted_request_count--;
56081+ tw_dev->state[request_id] = TW_S_INITIAL;
56082+
56083+ return retval;
56084+} /* End twl_initconnection() */
56085+
56086+/* This function will initialize the fields of a device extension */
56087+static int twl_initialize_device_extension(TW_Device_Extension *tw_dev)
56088+{
56089+ int i, retval = 1;
56090+
56091+ /* Initialize command packet buffers */
56092+ if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
56093+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x9, "Command packet memory allocation failed");
56094+ goto out;
56095+ }
56096+
56097+ /* Initialize generic buffer */
56098+ if (twl_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
56099+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Generic memory allocation failed");
56100+ goto out;
56101+ }
56102+
56103+ /* Allocate sense buffers */
56104+ if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Apache_Header), 2)) {
56105+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xb, "Sense buffer allocation failed");
56106+ goto out;
56107+ }
56108+
56109+ /* Allocate event info space */
56110+ tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
56111+ if (!tw_dev->event_queue[0]) {
56112+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "Event info memory allocation failed");
56113+ goto out;
56114+ }
56115+
56116+ for (i = 0; i < TW_Q_LENGTH; i++) {
56117+ tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
56118+ tw_dev->free_queue[i] = i;
56119+ tw_dev->state[i] = TW_S_INITIAL;
56120+ }
56121+
56122+ tw_dev->free_head = TW_Q_START;
56123+ tw_dev->free_tail = TW_Q_START;
56124+ tw_dev->error_sequence_id = 1;
56125+ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
56126+
56127+ mutex_init(&tw_dev->ioctl_lock);
56128+ init_waitqueue_head(&tw_dev->ioctl_wqueue);
56129+
56130+ retval = 0;
56131+out:
56132+ return retval;
56133+} /* End twl_initialize_device_extension() */
56134+
56135+/* This function will perform a pci-dma unmap */
56136+static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
56137+{
56138+ struct scsi_cmnd *cmd = tw_dev->srb[request_id];
56139+
56140+ if (cmd->SCp.phase == TW_PHASE_SGLIST)
56141+ scsi_dma_unmap(cmd);
56142+} /* End twl_unmap_scsi_data() */
56143+
56144+/* This function will handle attention interrupts */
56145+static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev)
56146+{
56147+ int retval = 1;
56148+ u32 request_id, doorbell;
56149+
56150+ /* Read doorbell status */
56151+ doorbell = readl(TWL_HOBDB_REG_ADDR(tw_dev));
56152+
56153+ /* Check for controller errors */
56154+ if (doorbell & TWL_DOORBELL_CONTROLLER_ERROR) {
56155+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "Microcontroller Error: clearing");
56156+ goto out;
56157+ }
56158+
56159+ /* Check if we need to perform an AEN drain */
56160+ if (doorbell & TWL_DOORBELL_ATTENTION_INTERRUPT) {
56161+ if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
56162+ twl_get_request_id(tw_dev, &request_id);
56163+ if (twl_aen_read_queue(tw_dev, request_id)) {
56164+ tw_dev->state[request_id] = TW_S_COMPLETED;
56165+ twl_free_request_id(tw_dev, request_id);
56166+ clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
56167+ }
56168+ }
56169+ }
56170+
56171+ retval = 0;
56172+out:
56173+ /* Clear doorbell interrupt */
56174+ TWL_CLEAR_DB_INTERRUPT(tw_dev);
56175+
56176+ /* Make sure the clear was flushed by reading it back */
56177+ readl(TWL_HOBDBC_REG_ADDR(tw_dev));
56178+
56179+ return retval;
56180+} /* End twl_handle_attention_interrupt() */
56181+
56182+/* Interrupt service routine */
56183+static irqreturn_t twl_interrupt(int irq, void *dev_instance)
56184+{
56185+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
56186+ int i, handled = 0, error = 0;
56187+ dma_addr_t mfa = 0;
56188+ u32 reg, regl, regh, response, request_id = 0;
56189+ struct scsi_cmnd *cmd;
56190+ TW_Command_Full *full_command_packet;
56191+
56192+ spin_lock(tw_dev->host->host_lock);
56193+
56194+ /* Read host interrupt status */
56195+ reg = readl(TWL_HISTAT_REG_ADDR(tw_dev));
56196+
56197+ /* Check if this is our interrupt, otherwise bail */
56198+ if (!(reg & TWL_HISTATUS_VALID_INTERRUPT))
56199+ goto twl_interrupt_bail;
56200+
56201+ handled = 1;
56202+
56203+ /* If we are resetting, bail */
56204+ if (test_bit(TW_IN_RESET, &tw_dev->flags))
56205+ goto twl_interrupt_bail;
56206+
56207+ /* Attention interrupt */
56208+ if (reg & TWL_HISTATUS_ATTENTION_INTERRUPT) {
56209+ if (twl_handle_attention_interrupt(tw_dev)) {
56210+ TWL_MASK_INTERRUPTS(tw_dev);
56211+ goto twl_interrupt_bail;
56212+ }
56213+ }
56214+
56215+ /* Response interrupt */
56216+ while (reg & TWL_HISTATUS_RESPONSE_INTERRUPT) {
56217+ if (sizeof(dma_addr_t) > 4) {
56218+ regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev));
56219+ regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
56220+ mfa = ((u64)regh << 32) | regl;
56221+ } else
56222+ mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
56223+
56224+ error = 0;
56225+ response = (u32)mfa;
56226+
56227+ /* Check for command packet error */
56228+ if (!TW_NOTMFA_OUT(response)) {
56229+ for (i=0;i<TW_Q_LENGTH;i++) {
56230+ if (tw_dev->sense_buffer_phys[i] == mfa) {
56231+ request_id = le16_to_cpu(tw_dev->sense_buffer_virt[i]->header_desc.request_id);
56232+ if (tw_dev->srb[request_id] != NULL)
56233+ error = twl_fill_sense(tw_dev, i, request_id, 1, 1);
56234+ else {
56235+ /* Skip ioctl error prints */
56236+ if (request_id != tw_dev->chrdev_request_id)
56237+ error = twl_fill_sense(tw_dev, i, request_id, 0, 1);
56238+ else
56239+ memcpy(tw_dev->command_packet_virt[request_id], tw_dev->sense_buffer_virt[i], sizeof(TW_Command_Apache_Header));
56240+ }
56241+
56242+ /* Now re-post the sense buffer */
56243+ writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev));
56244+ writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev));
56245+ break;
56246+ }
56247+ }
56248+ } else
56249+ request_id = TW_RESID_OUT(response);
56250+
56251+ full_command_packet = tw_dev->command_packet_virt[request_id];
56252+
56253+ /* Check for correct state */
56254+ if (tw_dev->state[request_id] != TW_S_POSTED) {
56255+ if (tw_dev->srb[request_id] != NULL) {
56256+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Received a request id that wasn't posted");
56257+ TWL_MASK_INTERRUPTS(tw_dev);
56258+ goto twl_interrupt_bail;
56259+ }
56260+ }
56261+
56262+ /* Check for internal command completion */
56263+ if (tw_dev->srb[request_id] == NULL) {
56264+ if (request_id != tw_dev->chrdev_request_id) {
56265+ if (twl_aen_complete(tw_dev, request_id))
56266+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xf, "Error completing AEN during attention interrupt");
56267+ } else {
56268+ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
56269+ wake_up(&tw_dev->ioctl_wqueue);
56270+ }
56271+ } else {
56272+ cmd = tw_dev->srb[request_id];
56273+
56274+ if (!error)
56275+ cmd->result = (DID_OK << 16);
56276+
56277+ /* Report residual bytes for single sgl */
56278+ if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
56279+ if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
56280+ scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
56281+ }
56282+
56283+ /* Now complete the io */
56284+ tw_dev->state[request_id] = TW_S_COMPLETED;
56285+ twl_free_request_id(tw_dev, request_id);
56286+ tw_dev->posted_request_count--;
56287+ tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
56288+ twl_unmap_scsi_data(tw_dev, request_id);
56289+ }
56290+
56291+ /* Check for another response interrupt */
56292+ reg = readl(TWL_HISTAT_REG_ADDR(tw_dev));
56293+ }
56294+
56295+twl_interrupt_bail:
56296+ spin_unlock(tw_dev->host->host_lock);
56297+ return IRQ_RETVAL(handled);
56298+} /* End twl_interrupt() */
56299+
56300+/* This function will poll for a register change */
56301+static int twl_poll_register(TW_Device_Extension *tw_dev, void *reg, u32 value, u32 result, int seconds)
56302+{
56303+ unsigned long before;
56304+ int retval = 1;
56305+ u32 reg_value;
56306+
56307+ reg_value = readl(reg);
56308+ before = jiffies;
56309+
56310+ while ((reg_value & value) != result) {
56311+ reg_value = readl(reg);
56312+ if (time_after(jiffies, before + HZ * seconds))
56313+ goto out;
56314+ msleep(50);
56315+ }
56316+ retval = 0;
56317+out:
56318+ return retval;
56319+} /* End twl_poll_register() */
56320+
56321+/* This function will reset a controller */
56322+static int twl_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
56323+{
56324+ int retval = 1;
56325+ int i = 0;
56326+ u32 status = 0;
56327+ unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
56328+ unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
56329+ u32 init_connect_result = 0;
56330+ int tries = 0;
56331+ int do_soft_reset = soft_reset;
56332+
56333+ while (tries < TW_MAX_RESET_TRIES) {
56334+ /* Do a soft reset if one is needed */
56335+ if (do_soft_reset) {
56336+ TWL_SOFT_RESET(tw_dev);
56337+
56338+ /* Make sure controller is in a good state */
56339+ if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, 0x0, 30)) {
56340+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Controller never went non-ready during reset sequence");
56341+ tries++;
56342+ continue;
56343+ }
56344+ if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, TWL_CONTROLLER_READY, 60)) {
56345+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x11, "Controller not ready during reset sequence");
56346+ tries++;
56347+ continue;
56348+ }
56349+ }
56350+
56351+ /* Initconnect */
56352+ if (twl_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
56353+ TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
56354+ TW_9750_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
56355+ TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
56356+ &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
56357+ &fw_on_ctlr_build, &init_connect_result)) {
56358+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x12, "Initconnection failed while checking SRL");
56359+ do_soft_reset = 1;
56360+ tries++;
56361+ continue;
56362+ }
56363+
56364+ /* Load sense buffers */
56365+ while (i < TW_Q_LENGTH) {
56366+ writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev));
56367+ writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev));
56368+
56369+ /* Check status for over-run after each write */
56370+ status = readl(TWL_STATUS_REG_ADDR(tw_dev));
56371+ if (!(status & TWL_STATUS_OVERRUN_SUBMIT))
56372+ i++;
56373+ }
56374+
56375+ /* Now check status */
56376+ status = readl(TWL_STATUS_REG_ADDR(tw_dev));
56377+ if (status) {
56378+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "Bad controller status after loading sense buffers");
56379+ do_soft_reset = 1;
56380+ tries++;
56381+ continue;
56382+ }
56383+
56384+ /* Drain the AEN queue */
56385+ if (twl_aen_drain_queue(tw_dev, soft_reset)) {
56386+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x14, "AEN drain failed during reset sequence");
56387+ do_soft_reset = 1;
56388+ tries++;
56389+ continue;
56390+ }
56391+
56392+ /* Load rest of compatibility struct */
56393+ strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION));
56394+ tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
56395+ tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
56396+ tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
56397+ tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
56398+ tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
56399+ tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
56400+ tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
56401+ tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
56402+ tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
56403+
56404+ /* If we got here, controller is in a good state */
56405+ retval = 0;
56406+ goto out;
56407+ }
56408+out:
56409+ return retval;
56410+} /* End twl_reset_sequence() */
56411+
56412+/* This function will reset a device extension */
56413+static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset)
56414+{
56415+ int i = 0, retval = 1;
56416+ unsigned long flags = 0;
56417+
56418+ /* Block SCSI requests while we are resetting */
56419+ if (ioctl_reset)
56420+ scsi_block_requests(tw_dev->host);
56421+
56422+ set_bit(TW_IN_RESET, &tw_dev->flags);
56423+ TWL_MASK_INTERRUPTS(tw_dev);
56424+ TWL_CLEAR_DB_INTERRUPT(tw_dev);
56425+
56426+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
56427+
56428+ /* Abort all requests that are in progress */
56429+ for (i = 0; i < TW_Q_LENGTH; i++) {
56430+ if ((tw_dev->state[i] != TW_S_FINISHED) &&
56431+ (tw_dev->state[i] != TW_S_INITIAL) &&
56432+ (tw_dev->state[i] != TW_S_COMPLETED)) {
56433+ if (tw_dev->srb[i]) {
56434+ tw_dev->srb[i]->result = (DID_RESET << 16);
56435+ tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
56436+ twl_unmap_scsi_data(tw_dev, i);
56437+ }
56438+ }
56439+ }
56440+
56441+ /* Reset queues and counts */
56442+ for (i = 0; i < TW_Q_LENGTH; i++) {
56443+ tw_dev->free_queue[i] = i;
56444+ tw_dev->state[i] = TW_S_INITIAL;
56445+ }
56446+ tw_dev->free_head = TW_Q_START;
56447+ tw_dev->free_tail = TW_Q_START;
56448+ tw_dev->posted_request_count = 0;
56449+
56450+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
56451+
56452+ if (twl_reset_sequence(tw_dev, 1))
56453+ goto out;
56454+
56455+ TWL_UNMASK_INTERRUPTS(tw_dev);
56456+
56457+ clear_bit(TW_IN_RESET, &tw_dev->flags);
56458+ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
56459+
56460+ retval = 0;
56461+out:
56462+ if (ioctl_reset)
56463+ scsi_unblock_requests(tw_dev->host);
56464+ return retval;
56465+} /* End twl_reset_device_extension() */
56466+
56467+/* This funciton returns unit geometry in cylinders/heads/sectors */
56468+static int twl_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
56469+{
56470+ int heads, sectors, cylinders;
56471+ TW_Device_Extension *tw_dev;
56472+
56473+ tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
56474+
56475+ if (capacity >= 0x200000) {
56476+ heads = 255;
56477+ sectors = 63;
56478+ cylinders = sector_div(capacity, heads * sectors);
56479+ } else {
56480+ heads = 64;
56481+ sectors = 32;
56482+ cylinders = sector_div(capacity, heads * sectors);
56483+ }
56484+
56485+ geom[0] = heads;
56486+ geom[1] = sectors;
56487+ geom[2] = cylinders;
56488+
56489+ return 0;
56490+} /* End twl_scsi_biosparam() */
56491+
56492+/* This is the new scsi eh reset function */
56493+static int twl_scsi_eh_reset(struct scsi_cmnd *SCpnt)
56494+{
56495+ TW_Device_Extension *tw_dev = NULL;
56496+ int retval = FAILED;
56497+
56498+ tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
56499+
56500+ tw_dev->num_resets++;
56501+
56502+ sdev_printk(KERN_WARNING, SCpnt->device,
56503+ "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
56504+ TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
56505+
56506+ /* Make sure we are not issuing an ioctl or resetting from ioctl */
56507+ mutex_lock(&tw_dev->ioctl_lock);
56508+
56509+ /* Now reset the card and some of the device extension data */
56510+ if (twl_reset_device_extension(tw_dev, 0)) {
56511+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "Controller reset failed during scsi host reset");
56512+ goto out;
56513+ }
56514+
56515+ retval = SUCCESS;
56516+out:
56517+ mutex_unlock(&tw_dev->ioctl_lock);
56518+ return retval;
56519+} /* End twl_scsi_eh_reset() */
56520+
56521+/* This is the main scsi queue function to handle scsi opcodes */
56522+static int twl_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
56523+{
56524+ int request_id, retval;
56525+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
56526+
56527+ /* If we are resetting due to timed out ioctl, report as busy */
56528+ if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
56529+ retval = SCSI_MLQUEUE_HOST_BUSY;
56530+ goto out;
56531+ }
56532+
56533+ /* Save done function into scsi_cmnd struct */
56534+ SCpnt->scsi_done = done;
56535+
56536+ /* Get a free request id */
56537+ twl_get_request_id(tw_dev, &request_id);
56538+
56539+ /* Save the scsi command for use by the ISR */
56540+ tw_dev->srb[request_id] = SCpnt;
56541+
56542+ /* Initialize phase to zero */
56543+ SCpnt->SCp.phase = TW_PHASE_INITIAL;
56544+
56545+ retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
56546+ if (retval) {
56547+ tw_dev->state[request_id] = TW_S_COMPLETED;
56548+ twl_free_request_id(tw_dev, request_id);
56549+ SCpnt->result = (DID_ERROR << 16);
56550+ done(SCpnt);
56551+ retval = 0;
56552+ }
56553+out:
56554+ return retval;
56555+} /* End twl_scsi_queue() */
56556+
56557+/* This function tells the controller to shut down */
56558+static void __twl_shutdown(TW_Device_Extension *tw_dev)
56559+{
56560+ /* Disable interrupts */
56561+ TWL_MASK_INTERRUPTS(tw_dev);
56562+
56563+ /* Free up the IRQ */
56564+ free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
56565+
56566+ printk(KERN_WARNING "3w-sas: Shutting down host %d.\n", tw_dev->host->host_no);
56567+
56568+ /* Tell the card we are shutting down */
56569+ if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
56570+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Connection shutdown failed");
56571+ } else {
56572+ printk(KERN_WARNING "3w-sas: Shutdown complete.\n");
56573+ }
56574+
56575+ /* Clear doorbell interrupt just before exit */
56576+ TWL_CLEAR_DB_INTERRUPT(tw_dev);
56577+} /* End __twl_shutdown() */
56578+
56579+/* Wrapper for __twl_shutdown */
56580+static void twl_shutdown(struct pci_dev *pdev)
56581+{
56582+ struct Scsi_Host *host = pci_get_drvdata(pdev);
56583+ TW_Device_Extension *tw_dev;
56584+
56585+ if (!host)
56586+ return;
56587+
56588+ tw_dev = (TW_Device_Extension *)host->hostdata;
56589+
56590+ if (tw_dev->online)
56591+ __twl_shutdown(tw_dev);
56592+} /* End twl_shutdown() */
56593+
56594+/* This function configures unit settings when a unit is coming on-line */
56595+static int twl_slave_configure(struct scsi_device *sdev)
56596+{
56597+ /* Force 60 second timeout */
56598+ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
56599+
56600+ return 0;
56601+} /* End twl_slave_configure() */
56602+
56603+/* scsi_host_template initializer */
56604+static struct scsi_host_template driver_template = {
56605+ .module = THIS_MODULE,
56606+ .name = "3w-sas",
56607+ .queuecommand = twl_scsi_queue,
56608+ .eh_host_reset_handler = twl_scsi_eh_reset,
56609+ .bios_param = twl_scsi_biosparam,
56610+ .change_queue_depth = twl_change_queue_depth,
56611+ .can_queue = TW_Q_LENGTH-2,
56612+ .slave_configure = twl_slave_configure,
56613+ .this_id = -1,
56614+ .sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH,
56615+ .max_sectors = TW_MAX_SECTORS,
56616+ .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
56617+ .use_clustering = ENABLE_CLUSTERING,
56618+ .shost_attrs = twl_host_attrs,
56619+ .emulated = 1
56620+};
56621+
56622+/* This function will probe and initialize a card */
56623+static int __devinit twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
56624+{
56625+ struct Scsi_Host *host = NULL;
56626+ TW_Device_Extension *tw_dev;
56627+ resource_size_t mem_addr, mem_len;
56628+ int retval = -ENODEV;
56629+ int *ptr_phycount, phycount=0;
56630+
56631+ retval = pci_enable_device(pdev);
56632+ if (retval) {
56633+ TW_PRINTK(host, TW_DRIVER, 0x17, "Failed to enable pci device");
56634+ goto out_disable_device;
56635+ }
56636+
56637+ pci_set_master(pdev);
56638+ pci_try_set_mwi(pdev);
56639+
56640+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
56641+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
56642+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
56643+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
56644+ TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
56645+ retval = -ENODEV;
56646+ goto out_disable_device;
56647+ }
56648+
56649+ host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
56650+ if (!host) {
56651+ TW_PRINTK(host, TW_DRIVER, 0x19, "Failed to allocate memory for device extension");
56652+ retval = -ENOMEM;
56653+ goto out_disable_device;
56654+ }
56655+ tw_dev = (TW_Device_Extension *)host->hostdata;
56656+
56657+ /* Save values to device extension */
56658+ tw_dev->host = host;
56659+ tw_dev->tw_pci_dev = pdev;
56660+
56661+ if (twl_initialize_device_extension(tw_dev)) {
56662+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
56663+ goto out_free_device_extension;
56664+ }
56665+
56666+ /* Request IO regions */
56667+ retval = pci_request_regions(pdev, "3w-sas");
56668+ if (retval) {
56669+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Failed to get mem region");
56670+ goto out_free_device_extension;
56671+ }
56672+
56673+ /* Use region 1 */
56674+ mem_addr = pci_resource_start(pdev, 1);
56675+ mem_len = pci_resource_len(pdev, 1);
56676+
56677+ /* Save base address */
56678+ tw_dev->base_addr = ioremap(mem_addr, mem_len);
56679+
56680+ if (!tw_dev->base_addr) {
56681+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
56682+ goto out_release_mem_region;
56683+ }
56684+
56685+ /* Disable interrupts on the card */
56686+ TWL_MASK_INTERRUPTS(tw_dev);
56687+
56688+ /* Initialize the card */
56689+ if (twl_reset_sequence(tw_dev, 0)) {
56690+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
56691+ goto out_iounmap;
56692+ }
56693+
56694+ /* Set host specific parameters */
56695+ host->max_id = TW_MAX_UNITS;
56696+ host->max_cmd_len = TW_MAX_CDB_LEN;
56697+ host->max_lun = TW_MAX_LUNS;
56698+ host->max_channel = 0;
56699+
56700+ /* Register the card with the kernel SCSI layer */
56701+ retval = scsi_add_host(host, &pdev->dev);
56702+ if (retval) {
56703+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "scsi add host failed");
56704+ goto out_iounmap;
56705+ }
56706+
56707+ pci_set_drvdata(pdev, host);
56708+
56709+ printk(KERN_WARNING "3w-sas: scsi%d: Found an LSI 3ware %s Controller at 0x%llx, IRQ: %d.\n",
56710+ host->host_no,
56711+ (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE,
56712+ TW_PARAM_MODEL, TW_PARAM_MODEL_LENGTH),
56713+ (u64)mem_addr, pdev->irq);
56714+
56715+ ptr_phycount = twl_get_param(tw_dev, 2, TW_PARAM_PHY_SUMMARY_TABLE,
56716+ TW_PARAM_PHYCOUNT, TW_PARAM_PHYCOUNT_LENGTH);
56717+ if (ptr_phycount)
56718+ phycount = le32_to_cpu(*(int *)ptr_phycount);
56719+
56720+ printk(KERN_WARNING "3w-sas: scsi%d: Firmware %s, BIOS %s, Phys: %d.\n",
56721+ host->host_no,
56722+ (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE,
56723+ TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
56724+ (char *)twl_get_param(tw_dev, 2, TW_VERSION_TABLE,
56725+ TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
56726+ phycount);
56727+
56728+ /* Try to enable MSI */
56729+ if (use_msi && !pci_enable_msi(pdev))
56730+ set_bit(TW_USING_MSI, &tw_dev->flags);
56731+
56732+ /* Now setup the interrupt handler */
56733+ retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev);
56734+ if (retval) {
56735+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Error requesting IRQ");
56736+ goto out_remove_host;
56737+ }
56738+
56739+ twl_device_extension_list[twl_device_extension_count] = tw_dev;
56740+ twl_device_extension_count++;
56741+
56742+ /* Re-enable interrupts on the card */
56743+ TWL_UNMASK_INTERRUPTS(tw_dev);
56744+
56745+ /* Finally, scan the host */
56746+ scsi_scan_host(host);
56747+
56748+ /* Add sysfs binary files */
56749+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr))
56750+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Failed to create sysfs binary file: 3ware_aen_read");
56751+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr))
56752+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Failed to create sysfs binary file: 3ware_compat_info");
56753+
56754+ if (twl_major == -1) {
56755+ if ((twl_major = register_chrdev (0, "twl", &twl_fops)) < 0)
56756+ TW_PRINTK(host, TW_DRIVER, 0x22, "Failed to register character device");
56757+ }
56758+ tw_dev->online = 1;
56759+ return 0;
56760+
56761+out_remove_host:
56762+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
56763+ pci_disable_msi(pdev);
56764+ scsi_remove_host(host);
56765+out_iounmap:
56766+ iounmap(tw_dev->base_addr);
56767+out_release_mem_region:
56768+ pci_release_regions(pdev);
56769+out_free_device_extension:
56770+ twl_free_device_extension(tw_dev);
56771+ scsi_host_put(host);
56772+out_disable_device:
56773+ pci_disable_device(pdev);
56774+
56775+ return retval;
56776+} /* End twl_probe() */
56777+
56778+/* This function is called to remove a device */
56779+static void twl_remove(struct pci_dev *pdev)
56780+{
56781+ struct Scsi_Host *host = pci_get_drvdata(pdev);
56782+ TW_Device_Extension *tw_dev;
56783+
56784+ if (!host)
56785+ return;
56786+
56787+ tw_dev = (TW_Device_Extension *)host->hostdata;
56788+
56789+ if (!tw_dev->online)
56790+ return;
56791+
56792+ /* Remove sysfs binary files */
56793+ sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr);
56794+ sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr);
56795+
56796+ scsi_remove_host(tw_dev->host);
56797+
56798+ /* Unregister character device */
56799+ if (twl_major >= 0) {
56800+ unregister_chrdev(twl_major, "twl");
56801+ twl_major = -1;
56802+ }
56803+
56804+ /* Shutdown the card */
56805+ __twl_shutdown(tw_dev);
56806+
56807+ /* Disable MSI if enabled */
56808+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
56809+ pci_disable_msi(pdev);
56810+
56811+ /* Free IO remapping */
56812+ iounmap(tw_dev->base_addr);
56813+
56814+ /* Free up the mem region */
56815+ pci_release_regions(pdev);
56816+
56817+ /* Free up device extension resources */
56818+ twl_free_device_extension(tw_dev);
56819+
56820+ scsi_host_put(tw_dev->host);
56821+ pci_disable_device(pdev);
56822+ twl_device_extension_count--;
56823+} /* End twl_remove() */
56824+
56825+#ifdef CONFIG_PM
56826+/* This function is called on PCI suspend */
56827+static int twl_suspend(struct pci_dev *pdev, pm_message_t state)
56828+{
56829+ struct Scsi_Host *host = pci_get_drvdata(pdev);
56830+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
56831+
56832+ printk(KERN_WARNING "3w-sas: Suspending host %d.\n", tw_dev->host->host_no);
56833+ /* Disable interrupts */
56834+ TWL_MASK_INTERRUPTS(tw_dev);
56835+
56836+ free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
56837+
56838+ /* Tell the card we are shutting down */
56839+ if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
56840+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x23, "Connection shutdown failed during suspend");
56841+ } else {
56842+ printk(KERN_WARNING "3w-sas: Suspend complete.\n");
56843+ }
56844+
56845+ /* Clear doorbell interrupt */
56846+ TWL_CLEAR_DB_INTERRUPT(tw_dev);
56847+
56848+ pci_save_state(pdev);
56849+ pci_disable_device(pdev);
56850+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
56851+
56852+ return 0;
56853+} /* End twl_suspend() */
56854+
56855+/* This function is called on PCI resume */
56856+static int twl_resume(struct pci_dev *pdev)
56857+{
56858+ int retval = 0;
56859+ struct Scsi_Host *host = pci_get_drvdata(pdev);
56860+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
56861+
56862+ printk(KERN_WARNING "3w-sas: Resuming host %d.\n", tw_dev->host->host_no);
56863+ pci_set_power_state(pdev, PCI_D0);
56864+ pci_enable_wake(pdev, PCI_D0, 0);
56865+ pci_restore_state(pdev);
56866+
56867+ retval = pci_enable_device(pdev);
56868+ if (retval) {
56869+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x24, "Enable device failed during resume");
56870+ return retval;
56871+ }
56872+
56873+ pci_set_master(pdev);
56874+ pci_try_set_mwi(pdev);
56875+
56876+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
56877+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
56878+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
56879+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
56880+ TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
56881+ retval = -ENODEV;
56882+ goto out_disable_device;
56883+ }
56884+
56885+ /* Initialize the card */
56886+ if (twl_reset_sequence(tw_dev, 0)) {
56887+ retval = -ENODEV;
56888+ goto out_disable_device;
56889+ }
56890+
56891+ /* Now setup the interrupt handler */
56892+ retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev);
56893+ if (retval) {
56894+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Error requesting IRQ during resume");
56895+ retval = -ENODEV;
56896+ goto out_disable_device;
56897+ }
56898+
56899+ /* Now enable MSI if enabled */
56900+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
56901+ pci_enable_msi(pdev);
56902+
56903+ /* Re-enable interrupts on the card */
56904+ TWL_UNMASK_INTERRUPTS(tw_dev);
56905+
56906+ printk(KERN_WARNING "3w-sas: Resume complete.\n");
56907+ return 0;
56908+
56909+out_disable_device:
56910+ scsi_remove_host(host);
56911+ pci_disable_device(pdev);
56912+
56913+ return retval;
56914+} /* End twl_resume() */
56915+#endif
56916+
56917+/* PCI Devices supported by this driver */
56918+static struct pci_device_id twl_pci_tbl[] __devinitdata = {
56919+ { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9750,
56920+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
56921+ { }
56922+};
56923+MODULE_DEVICE_TABLE(pci, twl_pci_tbl);
56924+
56925+/* pci_driver initializer */
56926+static struct pci_driver twl_driver = {
56927+ .name = "3w-sas",
56928+ .id_table = twl_pci_tbl,
56929+ .probe = twl_probe,
56930+ .remove = twl_remove,
56931+#ifdef CONFIG_PM
56932+ .suspend = twl_suspend,
56933+ .resume = twl_resume,
56934+#endif
56935+ .shutdown = twl_shutdown
56936+};
56937+
56938+/* This function is called on driver initialization */
56939+static int __init twl_init(void)
56940+{
56941+ printk(KERN_WARNING "LSI 3ware SAS/SATA-RAID Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
56942+
56943+ return pci_register_driver(&twl_driver);
56944+} /* End twl_init() */
56945+
56946+/* This function is called on driver exit */
56947+static void __exit twl_exit(void)
56948+{
56949+ pci_unregister_driver(&twl_driver);
56950+} /* End twl_exit() */
56951+
56952+module_init(twl_init);
56953+module_exit(twl_exit);
56954+
56955diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
56956new file mode 100644
56957index 0000000..e620505
56958--- /dev/null
56959+++ b/drivers/scsi/3w-sas.h
56960@@ -0,0 +1,396 @@
56961+/*
56962+ 3w-sas.h -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
56963+
56964+ Written By: Adam Radford <linuxraid@lsi.com>
56965+
56966+ Copyright (C) 2009 LSI Corporation.
56967+
56968+ This program is free software; you can redistribute it and/or modify
56969+ it under the terms of the GNU General Public License as published by
56970+ the Free Software Foundation; version 2 of the License.
56971+
56972+ This program is distributed in the hope that it will be useful,
56973+ but WITHOUT ANY WARRANTY; without even the implied warranty of
56974+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
56975+ GNU General Public License for more details.
56976+
56977+ NO WARRANTY
56978+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
56979+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
56980+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
56981+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
56982+ solely responsible for determining the appropriateness of using and
56983+ distributing the Program and assumes all risks associated with its
56984+ exercise of rights under this Agreement, including but not limited to
56985+ the risks and costs of program errors, damage to or loss of data,
56986+ programs or equipment, and unavailability or interruption of operations.
56987+
56988+ DISCLAIMER OF LIABILITY
56989+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
56990+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56991+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
56992+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
56993+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
56994+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
56995+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
56996+
56997+ You should have received a copy of the GNU General Public License
56998+ along with this program; if not, write to the Free Software
56999+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
57000+
57001+ Bugs/Comments/Suggestions should be mailed to:
57002+ linuxraid@lsi.com
57003+
57004+ For more information, goto:
57005+ http://www.lsi.com
57006+*/
57007+
57008+#ifndef _3W_SAS_H
57009+#define _3W_SAS_H
57010+
57011+/* AEN severity table */
57012+static char *twl_aen_severity_table[] =
57013+{
57014+ "None", "ERROR", "WARNING", "INFO", "DEBUG", (char*) 0
57015+};
57016+
57017+/* Liberator register offsets */
57018+#define TWL_STATUS 0x0 /* Status */
57019+#define TWL_HIBDB 0x20 /* Inbound doorbell */
57020+#define TWL_HISTAT 0x30 /* Host interrupt status */
57021+#define TWL_HIMASK 0x34 /* Host interrupt mask */
57022+#define TWL_HOBDB 0x9C /* Outbound doorbell */
57023+#define TWL_HOBDBC 0xA0 /* Outbound doorbell clear */
57024+#define TWL_SCRPD3 0xBC /* Scratchpad */
57025+#define TWL_HIBQPL 0xC0 /* Host inbound Q low */
57026+#define TWL_HIBQPH 0xC4 /* Host inbound Q high */
57027+#define TWL_HOBQPL 0xC8 /* Host outbound Q low */
57028+#define TWL_HOBQPH 0xCC /* Host outbound Q high */
57029+#define TWL_HISTATUS_VALID_INTERRUPT 0xC
57030+#define TWL_HISTATUS_ATTENTION_INTERRUPT 0x4
57031+#define TWL_HISTATUS_RESPONSE_INTERRUPT 0x8
57032+#define TWL_STATUS_OVERRUN_SUBMIT 0x2000
57033+#define TWL_ISSUE_SOFT_RESET 0x100
57034+#define TWL_CONTROLLER_READY 0x2000
57035+#define TWL_DOORBELL_CONTROLLER_ERROR 0x200000
57036+#define TWL_DOORBELL_ATTENTION_INTERRUPT 0x40000
57037+#define TWL_PULL_MODE 0x1
57038+
57039+/* Command packet opcodes used by the driver */
57040+#define TW_OP_INIT_CONNECTION 0x1
57041+#define TW_OP_GET_PARAM 0x12
57042+#define TW_OP_SET_PARAM 0x13
57043+#define TW_OP_EXECUTE_SCSI 0x10
57044+
57045+/* Asynchronous Event Notification (AEN) codes used by the driver */
57046+#define TW_AEN_QUEUE_EMPTY 0x0000
57047+#define TW_AEN_SOFT_RESET 0x0001
57048+#define TW_AEN_SYNC_TIME_WITH_HOST 0x031
57049+#define TW_AEN_SEVERITY_ERROR 0x1
57050+#define TW_AEN_SEVERITY_DEBUG 0x4
57051+#define TW_AEN_NOT_RETRIEVED 0x1
57052+
57053+/* Command state defines */
57054+#define TW_S_INITIAL 0x1 /* Initial state */
57055+#define TW_S_STARTED 0x2 /* Id in use */
57056+#define TW_S_POSTED 0x4 /* Posted to the controller */
57057+#define TW_S_COMPLETED 0x8 /* Completed by isr */
57058+#define TW_S_FINISHED 0x10 /* I/O completely done */
57059+
57060+/* Compatibility defines */
57061+#define TW_9750_ARCH_ID 10
57062+#define TW_CURRENT_DRIVER_SRL 40
57063+#define TW_CURRENT_DRIVER_BUILD 0
57064+#define TW_CURRENT_DRIVER_BRANCH 0
57065+
57066+/* Phase defines */
57067+#define TW_PHASE_INITIAL 0
57068+#define TW_PHASE_SGLIST 2
57069+
57070+/* Misc defines */
57071+#define TW_SECTOR_SIZE 512
57072+#define TW_MAX_UNITS 32
57073+#define TW_INIT_MESSAGE_CREDITS 0x100
57074+#define TW_INIT_COMMAND_PACKET_SIZE 0x3
57075+#define TW_INIT_COMMAND_PACKET_SIZE_EXTENDED 0x6
57076+#define TW_EXTENDED_INIT_CONNECT 0x2
57077+#define TW_BASE_FW_SRL 24
57078+#define TW_BASE_FW_BRANCH 0
57079+#define TW_BASE_FW_BUILD 1
57080+#define TW_Q_LENGTH 256
57081+#define TW_Q_START 0
57082+#define TW_MAX_SLOT 32
57083+#define TW_MAX_RESET_TRIES 2
57084+#define TW_MAX_CMDS_PER_LUN 254
57085+#define TW_MAX_AEN_DRAIN 255
57086+#define TW_IN_RESET 2
57087+#define TW_USING_MSI 3
57088+#define TW_IN_ATTENTION_LOOP 4
57089+#define TW_MAX_SECTORS 256
57090+#define TW_MAX_CDB_LEN 16
57091+#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
57092+#define TW_IOCTL_CHRDEV_FREE -1
57093+#define TW_COMMAND_OFFSET 128 /* 128 bytes */
57094+#define TW_VERSION_TABLE 0x0402
57095+#define TW_TIMEKEEP_TABLE 0x040A
57096+#define TW_INFORMATION_TABLE 0x0403
57097+#define TW_PARAM_FWVER 3
57098+#define TW_PARAM_FWVER_LENGTH 16
57099+#define TW_PARAM_BIOSVER 4
57100+#define TW_PARAM_BIOSVER_LENGTH 16
57101+#define TW_PARAM_MODEL 8
57102+#define TW_PARAM_MODEL_LENGTH 16
57103+#define TW_PARAM_PHY_SUMMARY_TABLE 1
57104+#define TW_PARAM_PHYCOUNT 2
57105+#define TW_PARAM_PHYCOUNT_LENGTH 1
57106+#define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108 // Used by smartmontools
57107+#define TW_ALLOCATION_LENGTH 128
57108+#define TW_SENSE_DATA_LENGTH 18
57109+#define TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED 0x10a
57110+#define TW_ERROR_INVALID_FIELD_IN_CDB 0x10d
57111+#define TW_ERROR_UNIT_OFFLINE 0x128
57112+#define TW_MESSAGE_SOURCE_CONTROLLER_ERROR 3
57113+#define TW_MESSAGE_SOURCE_CONTROLLER_EVENT 4
57114+#define TW_DRIVER 6
57115+#ifndef PCI_DEVICE_ID_3WARE_9750
57116+#define PCI_DEVICE_ID_3WARE_9750 0x1010
57117+#endif
57118+
57119+/* Bitmask macros to eliminate bitfields */
57120+
57121+/* opcode: 5, reserved: 3 */
57122+#define TW_OPRES_IN(x,y) ((x << 5) | (y & 0x1f))
57123+#define TW_OP_OUT(x) (x & 0x1f)
57124+
57125+/* opcode: 5, sgloffset: 3 */
57126+#define TW_OPSGL_IN(x,y) ((x << 5) | (y & 0x1f))
57127+#define TW_SGL_OUT(x) ((x >> 5) & 0x7)
57128+
57129+/* severity: 3, reserved: 5 */
57130+#define TW_SEV_OUT(x) (x & 0x7)
57131+
57132+/* not_mfa: 1, reserved: 7, status: 8, request_id: 16 */
57133+#define TW_RESID_OUT(x) ((x >> 16) & 0xffff)
57134+#define TW_NOTMFA_OUT(x) (x & 0x1)
57135+
57136+/* request_id: 12, lun: 4 */
57137+#define TW_REQ_LUN_IN(lun, request_id) (((lun << 12) & 0xf000) | (request_id & 0xfff))
57138+#define TW_LUN_OUT(lun) ((lun >> 12) & 0xf)
57139+
57140+/* Register access macros */
57141+#define TWL_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_STATUS)
57142+#define TWL_HOBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPL)
57143+#define TWL_HOBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPH)
57144+#define TWL_HOBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDB)
57145+#define TWL_HOBDBC_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDBC)
57146+#define TWL_HIMASK_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIMASK)
57147+#define TWL_HISTAT_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HISTAT)
57148+#define TWL_HIBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPH)
57149+#define TWL_HIBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPL)
57150+#define TWL_HIBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBDB)
57151+#define TWL_SCRPD3_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_SCRPD3)
57152+#define TWL_MASK_INTERRUPTS(x) (writel(~0, TWL_HIMASK_REG_ADDR(tw_dev)))
57153+#define TWL_UNMASK_INTERRUPTS(x) (writel(~TWL_HISTATUS_VALID_INTERRUPT, TWL_HIMASK_REG_ADDR(tw_dev)))
57154+#define TWL_CLEAR_DB_INTERRUPT(x) (writel(~0, TWL_HOBDBC_REG_ADDR(tw_dev)))
57155+#define TWL_SOFT_RESET(x) (writel(TWL_ISSUE_SOFT_RESET, TWL_HIBDB_REG_ADDR(tw_dev)))
57156+
57157+/* Macros */
57158+#define TW_PRINTK(h,a,b,c) { \
57159+if (h) \
57160+printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s.\n",h->host_no,a,b,c); \
57161+else \
57162+printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s.\n",a,b,c); \
57163+}
57164+#define TW_MAX_LUNS 16
57165+#define TW_COMMAND_SIZE (sizeof(dma_addr_t) > 4 ? 6 : 4)
57166+#define TW_LIBERATOR_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 46 : 92)
57167+#define TW_LIBERATOR_MAX_SGL_LENGTH_OLD (sizeof(dma_addr_t) > 4 ? 47 : 94)
57168+#define TW_PADDING_LENGTH_LIBERATOR 136
57169+#define TW_PADDING_LENGTH_LIBERATOR_OLD 132
57170+#define TW_CPU_TO_SGL(x) (sizeof(dma_addr_t) > 4 ? cpu_to_le64(x) : cpu_to_le32(x))
57171+
57172+#pragma pack(1)
57173+
57174+/* SGL entry */
57175+typedef struct TAG_TW_SG_Entry_ISO {
57176+ dma_addr_t address;
57177+ dma_addr_t length;
57178+} TW_SG_Entry_ISO;
57179+
57180+/* Old Command Packet with ISO SGL */
57181+typedef struct TW_Command {
57182+ unsigned char opcode__sgloffset;
57183+ unsigned char size;
57184+ unsigned char request_id;
57185+ unsigned char unit__hostid;
57186+ /* Second DWORD */
57187+ unsigned char status;
57188+ unsigned char flags;
57189+ union {
57190+ unsigned short block_count;
57191+ unsigned short parameter_count;
57192+ } byte6_offset;
57193+ union {
57194+ struct {
57195+ u32 lba;
57196+ TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD];
57197+ unsigned char padding[TW_PADDING_LENGTH_LIBERATOR_OLD];
57198+ } io;
57199+ struct {
57200+ TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD];
57201+ u32 padding;
57202+ unsigned char padding2[TW_PADDING_LENGTH_LIBERATOR_OLD];
57203+ } param;
57204+ } byte8_offset;
57205+} TW_Command;
57206+
57207+/* New Command Packet with ISO SGL */
57208+typedef struct TAG_TW_Command_Apache {
57209+ unsigned char opcode__reserved;
57210+ unsigned char unit;
57211+ unsigned short request_id__lunl;
57212+ unsigned char status;
57213+ unsigned char sgl_offset;
57214+ unsigned short sgl_entries__lunh;
57215+ unsigned char cdb[16];
57216+ TW_SG_Entry_ISO sg_list[TW_LIBERATOR_MAX_SGL_LENGTH];
57217+ unsigned char padding[TW_PADDING_LENGTH_LIBERATOR];
57218+} TW_Command_Apache;
57219+
57220+/* New command packet header */
57221+typedef struct TAG_TW_Command_Apache_Header {
57222+ unsigned char sense_data[TW_SENSE_DATA_LENGTH];
57223+ struct {
57224+ char reserved[4];
57225+ unsigned short error;
57226+ unsigned char padding;
57227+ unsigned char severity__reserved;
57228+ } status_block;
57229+ unsigned char err_specific_desc[98];
57230+ struct {
57231+ unsigned char size_header;
57232+ unsigned short request_id;
57233+ unsigned char size_sense;
57234+ } header_desc;
57235+} TW_Command_Apache_Header;
57236+
57237+/* This struct is a union of the 2 command packets */
57238+typedef struct TAG_TW_Command_Full {
57239+ TW_Command_Apache_Header header;
57240+ union {
57241+ TW_Command oldcommand;
57242+ TW_Command_Apache newcommand;
57243+ } command;
57244+} TW_Command_Full;
57245+
57246+/* Initconnection structure */
57247+typedef struct TAG_TW_Initconnect {
57248+ unsigned char opcode__reserved;
57249+ unsigned char size;
57250+ unsigned char request_id;
57251+ unsigned char res2;
57252+ unsigned char status;
57253+ unsigned char flags;
57254+ unsigned short message_credits;
57255+ u32 features;
57256+ unsigned short fw_srl;
57257+ unsigned short fw_arch_id;
57258+ unsigned short fw_branch;
57259+ unsigned short fw_build;
57260+ u32 result;
57261+} TW_Initconnect;
57262+
57263+/* Event info structure */
57264+typedef struct TAG_TW_Event
57265+{
57266+ unsigned int sequence_id;
57267+ unsigned int time_stamp_sec;
57268+ unsigned short aen_code;
57269+ unsigned char severity;
57270+ unsigned char retrieved;
57271+ unsigned char repeat_count;
57272+ unsigned char parameter_len;
57273+ unsigned char parameter_data[98];
57274+} TW_Event;
57275+
57276+typedef struct TAG_TW_Ioctl_Driver_Command {
57277+ unsigned int control_code;
57278+ unsigned int status;
57279+ unsigned int unique_id;
57280+ unsigned int sequence_id;
57281+ unsigned int os_specific;
57282+ unsigned int buffer_length;
57283+} TW_Ioctl_Driver_Command;
57284+
57285+typedef struct TAG_TW_Ioctl_Apache {
57286+ TW_Ioctl_Driver_Command driver_command;
57287+ char padding[488];
57288+ TW_Command_Full firmware_command;
57289+ char data_buffer[1];
57290+} TW_Ioctl_Buf_Apache;
57291+
57292+/* GetParam descriptor */
57293+typedef struct {
57294+ unsigned short table_id;
57295+ unsigned short parameter_id;
57296+ unsigned short parameter_size_bytes;
57297+ unsigned short actual_parameter_size_bytes;
57298+ unsigned char data[1];
57299+} TW_Param_Apache;
57300+
57301+/* Compatibility information structure */
57302+typedef struct TAG_TW_Compatibility_Info
57303+{
57304+ char driver_version[32];
57305+ unsigned short working_srl;
57306+ unsigned short working_branch;
57307+ unsigned short working_build;
57308+ unsigned short driver_srl_high;
57309+ unsigned short driver_branch_high;
57310+ unsigned short driver_build_high;
57311+ unsigned short driver_srl_low;
57312+ unsigned short driver_branch_low;
57313+ unsigned short driver_build_low;
57314+ unsigned short fw_on_ctlr_srl;
57315+ unsigned short fw_on_ctlr_branch;
57316+ unsigned short fw_on_ctlr_build;
57317+} TW_Compatibility_Info;
57318+
57319+#pragma pack()
57320+
57321+typedef struct TAG_TW_Device_Extension {
57322+ void __iomem *base_addr;
57323+ unsigned long *generic_buffer_virt[TW_Q_LENGTH];
57324+ dma_addr_t generic_buffer_phys[TW_Q_LENGTH];
57325+ TW_Command_Full *command_packet_virt[TW_Q_LENGTH];
57326+ dma_addr_t command_packet_phys[TW_Q_LENGTH];
57327+ TW_Command_Apache_Header *sense_buffer_virt[TW_Q_LENGTH];
57328+ dma_addr_t sense_buffer_phys[TW_Q_LENGTH];
57329+ struct pci_dev *tw_pci_dev;
57330+ struct scsi_cmnd *srb[TW_Q_LENGTH];
57331+ unsigned char free_queue[TW_Q_LENGTH];
57332+ unsigned char free_head;
57333+ unsigned char free_tail;
57334+ int state[TW_Q_LENGTH];
57335+ unsigned int posted_request_count;
57336+ unsigned int max_posted_request_count;
57337+ unsigned int max_sgl_entries;
57338+ unsigned int sgl_entries;
57339+ unsigned int num_resets;
57340+ unsigned int sector_count;
57341+ unsigned int max_sector_count;
57342+ unsigned int aen_count;
57343+ struct Scsi_Host *host;
57344+ long flags;
57345+ TW_Event *event_queue[TW_Q_LENGTH];
57346+ unsigned char error_index;
57347+ unsigned int error_sequence_id;
57348+ int chrdev_request_id;
57349+ wait_queue_head_t ioctl_wqueue;
57350+ struct mutex ioctl_lock;
57351+ TW_Compatibility_Info tw_compat_info;
57352+ char online;
57353+} TW_Device_Extension;
57354+
57355+#endif /* _3W_SAS_H */
57356+
57357diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
57358index 1ddcf40..a85f062 100644
57359--- a/drivers/scsi/BusLogic.c
57360+++ b/drivers/scsi/BusLogic.c
57361@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
57362 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
57363 *PrototypeHostAdapter)
57364 {
57365+ pax_track_stack();
57366+
57367 /*
57368 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
57369 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
57370diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
57371index e11cca4..4295679 100644
57372--- a/drivers/scsi/Kconfig
57373+++ b/drivers/scsi/Kconfig
57374@@ -399,6 +399,17 @@ config SCSI_3W_9XXX
57375 Please read the comments at the top of
57376 <file:drivers/scsi/3w-9xxx.c>.
57377
57378+config SCSI_3W_SAS
57379+ tristate "3ware 97xx SAS/SATA-RAID support"
57380+ depends on PCI && SCSI
57381+ help
57382+ This driver supports the LSI 3ware 9750 6Gb/s SAS/SATA-RAID cards.
57383+
57384+ <http://www.lsi.com>
57385+
57386+ Please read the comments at the top of
57387+ <file:drivers/scsi/3w-sas.c>.
57388+
57389 config SCSI_7000FASST
57390 tristate "7000FASST SCSI support"
57391 depends on ISA && SCSI && ISA_DMA_API
57392@@ -621,6 +632,14 @@ config SCSI_FLASHPOINT
57393 substantial, so users of MultiMaster Host Adapters may not
57394 wish to include it.
57395
57396+config VMWARE_PVSCSI
57397+ tristate "VMware PVSCSI driver support"
57398+ depends on PCI && SCSI && X86
57399+ help
57400+ This driver supports VMware's para virtualized SCSI HBA.
57401+ To compile this driver as a module, choose M here: the
57402+ module will be called vmw_pvscsi.
57403+
57404 config LIBFC
57405 tristate "LibFC module"
57406 select SCSI_FC_ATTRS
57407diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
57408index 3ad61db..c938975 100644
57409--- a/drivers/scsi/Makefile
57410+++ b/drivers/scsi/Makefile
57411@@ -113,6 +113,7 @@ obj-$(CONFIG_SCSI_MESH) += mesh.o
57412 obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o
57413 obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
57414 obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
57415+obj-$(CONFIG_SCSI_3W_SAS) += 3w-sas.o
57416 obj-$(CONFIG_SCSI_PPA) += ppa.o
57417 obj-$(CONFIG_SCSI_IMM) += imm.o
57418 obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o
57419@@ -133,6 +134,7 @@ obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
57420 obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
57421 obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
57422 obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
57423+obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o
57424
57425 obj-$(CONFIG_ARM) += arm/
57426
57427diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
57428index cdbdec9..b7d560b 100644
57429--- a/drivers/scsi/aacraid/aacraid.h
57430+++ b/drivers/scsi/aacraid/aacraid.h
57431@@ -471,7 +471,7 @@ struct adapter_ops
57432 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
57433 /* Administrative operations */
57434 int (*adapter_comm)(struct aac_dev * dev, int comm);
57435-};
57436+} __no_const;
57437
57438 /*
57439 * Define which interrupt handler needs to be installed
57440diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
57441index a5b8e7b..a6a0e43 100644
57442--- a/drivers/scsi/aacraid/commctrl.c
57443+++ b/drivers/scsi/aacraid/commctrl.c
57444@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
57445 u32 actual_fibsize64, actual_fibsize = 0;
57446 int i;
57447
57448+ pax_track_stack();
57449
57450 if (dev->in_reset) {
57451 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
57452diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
57453index 9b97c3e..f099725 100644
57454--- a/drivers/scsi/aacraid/linit.c
57455+++ b/drivers/scsi/aacraid/linit.c
57456@@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
57457 #elif defined(__devinitconst)
57458 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
57459 #else
57460-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
57461+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
57462 #endif
57463 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
57464 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
57465diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
57466index 996f722..9127845 100644
57467--- a/drivers/scsi/aic94xx/aic94xx_init.c
57468+++ b/drivers/scsi/aic94xx/aic94xx_init.c
57469@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
57470 flash_error_table[i].reason);
57471 }
57472
57473-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
57474+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
57475 asd_show_update_bios, asd_store_update_bios);
57476
57477 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
57478@@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
57479 .lldd_control_phy = asd_control_phy,
57480 };
57481
57482-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
57483+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
57484 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
57485 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
57486 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
57487diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
57488index 58efd4b..cb48dc7 100644
57489--- a/drivers/scsi/bfa/bfa_ioc.h
57490+++ b/drivers/scsi/bfa/bfa_ioc.h
57491@@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
57492 bfa_ioc_disable_cbfn_t disable_cbfn;
57493 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
57494 bfa_ioc_reset_cbfn_t reset_cbfn;
57495-};
57496+} __no_const;
57497
57498 /**
57499 * Heartbeat failure notification queue element.
57500diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
57501index 7ad177e..5503586 100644
57502--- a/drivers/scsi/bfa/bfa_iocfc.h
57503+++ b/drivers/scsi/bfa/bfa_iocfc.h
57504@@ -61,7 +61,7 @@ struct bfa_hwif_s {
57505 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
57506 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
57507 u32 *nvecs, u32 *maxvec);
57508-};
57509+} __no_const;
57510 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
57511
57512 struct bfa_iocfc_s {
57513diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
57514index 4967643..cbec06b 100644
57515--- a/drivers/scsi/dpt_i2o.c
57516+++ b/drivers/scsi/dpt_i2o.c
57517@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
57518 dma_addr_t addr;
57519 ulong flags = 0;
57520
57521+ pax_track_stack();
57522+
57523 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
57524 // get user msg size in u32s
57525 if(get_user(size, &user_msg[0])){
57526@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
57527 s32 rcode;
57528 dma_addr_t addr;
57529
57530+ pax_track_stack();
57531+
57532 memset(msg, 0 , sizeof(msg));
57533 len = scsi_bufflen(cmd);
57534 direction = 0x00000000;
57535diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
57536index c7076ce..e20c67c 100644
57537--- a/drivers/scsi/eata.c
57538+++ b/drivers/scsi/eata.c
57539@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
57540 struct hostdata *ha;
57541 char name[16];
57542
57543+ pax_track_stack();
57544+
57545 sprintf(name, "%s%d", driver_name, j);
57546
57547 if (!request_region(port_base, REGION_SIZE, driver_name)) {
57548diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
57549index 11ae5c9..891daec 100644
57550--- a/drivers/scsi/fcoe/libfcoe.c
57551+++ b/drivers/scsi/fcoe/libfcoe.c
57552@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
57553 size_t rlen;
57554 size_t dlen;
57555
57556+ pax_track_stack();
57557+
57558 fiph = (struct fip_header *)skb->data;
57559 sub = fiph->fip_subcode;
57560 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
57561diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
57562index 71c7bbe..e93088a 100644
57563--- a/drivers/scsi/fnic/fnic_main.c
57564+++ b/drivers/scsi/fnic/fnic_main.c
57565@@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
57566 /* Start local port initiatialization */
57567
57568 lp->link_up = 0;
57569- lp->tt = fnic_transport_template;
57570+ memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
57571
57572 lp->max_retry_count = fnic->config.flogi_retries;
57573 lp->max_rport_retry_count = fnic->config.plogi_retries;
57574diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
57575index bb96d74..9ec3ce4 100644
57576--- a/drivers/scsi/gdth.c
57577+++ b/drivers/scsi/gdth.c
57578@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
57579 ulong flags;
57580 gdth_ha_str *ha;
57581
57582+ pax_track_stack();
57583+
57584 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
57585 return -EFAULT;
57586 ha = gdth_find_ha(ldrv.ionode);
57587@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
57588 gdth_ha_str *ha;
57589 int rval;
57590
57591+ pax_track_stack();
57592+
57593 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
57594 res.number >= MAX_HDRIVES)
57595 return -EFAULT;
57596@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd)
57597 gdth_ha_str *ha;
57598 int rval;
57599
57600+ pax_track_stack();
57601+
57602 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
57603 return -EFAULT;
57604 ha = gdth_find_ha(gen.ionode);
57605@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
57606 int i;
57607 gdth_cmd_str gdtcmd;
57608 char cmnd[MAX_COMMAND_SIZE];
57609+
57610+ pax_track_stack();
57611+
57612 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
57613
57614 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
57615diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
57616index 1258da3..20d8ae6 100644
57617--- a/drivers/scsi/gdth_proc.c
57618+++ b/drivers/scsi/gdth_proc.c
57619@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
57620 ulong64 paddr;
57621
57622 char cmnd[MAX_COMMAND_SIZE];
57623+
57624+ pax_track_stack();
57625+
57626 memset(cmnd, 0xff, 12);
57627 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
57628
57629@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
57630 gdth_hget_str *phg;
57631 char cmnd[MAX_COMMAND_SIZE];
57632
57633+ pax_track_stack();
57634+
57635 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
57636 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
57637 if (!gdtcmd || !estr)
57638diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
57639index d03a926..f324286 100644
57640--- a/drivers/scsi/hosts.c
57641+++ b/drivers/scsi/hosts.c
57642@@ -40,7 +40,7 @@
57643 #include "scsi_logging.h"
57644
57645
57646-static atomic_t scsi_host_next_hn; /* host_no for next new host */
57647+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
57648
57649
57650 static void scsi_host_cls_release(struct device *dev)
57651@@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
57652 * subtract one because we increment first then return, but we need to
57653 * know what the next host number was before increment
57654 */
57655- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
57656+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
57657 shost->dma_channel = 0xff;
57658
57659 /* These three are default values which can be overridden */
57660diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
57661index a601159..55e19d2 100644
57662--- a/drivers/scsi/ipr.c
57663+++ b/drivers/scsi/ipr.c
57664@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
57665 return true;
57666 }
57667
57668-static struct ata_port_operations ipr_sata_ops = {
57669+static const struct ata_port_operations ipr_sata_ops = {
57670 .phy_reset = ipr_ata_phy_reset,
57671 .hardreset = ipr_sata_reset,
57672 .post_internal_cmd = ipr_ata_post_internal,
57673diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
57674index 4e49fbc..97907ff 100644
57675--- a/drivers/scsi/ips.h
57676+++ b/drivers/scsi/ips.h
57677@@ -1027,7 +1027,7 @@ typedef struct {
57678 int (*intr)(struct ips_ha *);
57679 void (*enableint)(struct ips_ha *);
57680 uint32_t (*statupd)(struct ips_ha *);
57681-} ips_hw_func_t;
57682+} __no_const ips_hw_func_t;
57683
57684 typedef struct ips_ha {
57685 uint8_t ha_id[IPS_MAX_CHANNELS+1];
57686diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
57687index c1c1574..a9c9348 100644
57688--- a/drivers/scsi/libfc/fc_exch.c
57689+++ b/drivers/scsi/libfc/fc_exch.c
57690@@ -86,12 +86,12 @@ struct fc_exch_mgr {
57691 * all together if not used XXX
57692 */
57693 struct {
57694- atomic_t no_free_exch;
57695- atomic_t no_free_exch_xid;
57696- atomic_t xid_not_found;
57697- atomic_t xid_busy;
57698- atomic_t seq_not_found;
57699- atomic_t non_bls_resp;
57700+ atomic_unchecked_t no_free_exch;
57701+ atomic_unchecked_t no_free_exch_xid;
57702+ atomic_unchecked_t xid_not_found;
57703+ atomic_unchecked_t xid_busy;
57704+ atomic_unchecked_t seq_not_found;
57705+ atomic_unchecked_t non_bls_resp;
57706 } stats;
57707 };
57708 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
57709@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
57710 /* allocate memory for exchange */
57711 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
57712 if (!ep) {
57713- atomic_inc(&mp->stats.no_free_exch);
57714+ atomic_inc_unchecked(&mp->stats.no_free_exch);
57715 goto out;
57716 }
57717 memset(ep, 0, sizeof(*ep));
57718@@ -557,7 +557,7 @@ out:
57719 return ep;
57720 err:
57721 spin_unlock_bh(&pool->lock);
57722- atomic_inc(&mp->stats.no_free_exch_xid);
57723+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
57724 mempool_free(ep, mp->ep_pool);
57725 return NULL;
57726 }
57727@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
57728 xid = ntohs(fh->fh_ox_id); /* we originated exch */
57729 ep = fc_exch_find(mp, xid);
57730 if (!ep) {
57731- atomic_inc(&mp->stats.xid_not_found);
57732+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57733 reject = FC_RJT_OX_ID;
57734 goto out;
57735 }
57736@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
57737 ep = fc_exch_find(mp, xid);
57738 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
57739 if (ep) {
57740- atomic_inc(&mp->stats.xid_busy);
57741+ atomic_inc_unchecked(&mp->stats.xid_busy);
57742 reject = FC_RJT_RX_ID;
57743 goto rel;
57744 }
57745@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
57746 }
57747 xid = ep->xid; /* get our XID */
57748 } else if (!ep) {
57749- atomic_inc(&mp->stats.xid_not_found);
57750+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57751 reject = FC_RJT_RX_ID; /* XID not found */
57752 goto out;
57753 }
57754@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
57755 } else {
57756 sp = &ep->seq;
57757 if (sp->id != fh->fh_seq_id) {
57758- atomic_inc(&mp->stats.seq_not_found);
57759+ atomic_inc_unchecked(&mp->stats.seq_not_found);
57760 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
57761 goto rel;
57762 }
57763@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
57764
57765 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
57766 if (!ep) {
57767- atomic_inc(&mp->stats.xid_not_found);
57768+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57769 goto out;
57770 }
57771 if (ep->esb_stat & ESB_ST_COMPLETE) {
57772- atomic_inc(&mp->stats.xid_not_found);
57773+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57774 goto out;
57775 }
57776 if (ep->rxid == FC_XID_UNKNOWN)
57777 ep->rxid = ntohs(fh->fh_rx_id);
57778 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
57779- atomic_inc(&mp->stats.xid_not_found);
57780+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57781 goto rel;
57782 }
57783 if (ep->did != ntoh24(fh->fh_s_id) &&
57784 ep->did != FC_FID_FLOGI) {
57785- atomic_inc(&mp->stats.xid_not_found);
57786+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57787 goto rel;
57788 }
57789 sof = fr_sof(fp);
57790@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
57791 } else {
57792 sp = &ep->seq;
57793 if (sp->id != fh->fh_seq_id) {
57794- atomic_inc(&mp->stats.seq_not_found);
57795+ atomic_inc_unchecked(&mp->stats.seq_not_found);
57796 goto rel;
57797 }
57798 }
57799@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
57800 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
57801
57802 if (!sp)
57803- atomic_inc(&mp->stats.xid_not_found);
57804+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57805 else
57806- atomic_inc(&mp->stats.non_bls_resp);
57807+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
57808
57809 fc_frame_free(fp);
57810 }
57811diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
57812index 0ee989f..a582241 100644
57813--- a/drivers/scsi/libsas/sas_ata.c
57814+++ b/drivers/scsi/libsas/sas_ata.c
57815@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
57816 }
57817 }
57818
57819-static struct ata_port_operations sas_sata_ops = {
57820+static const struct ata_port_operations sas_sata_ops = {
57821 .phy_reset = sas_ata_phy_reset,
57822 .post_internal_cmd = sas_ata_post_internal,
57823 .qc_defer = ata_std_qc_defer,
57824diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
57825index aa10f79..5cc79e4 100644
57826--- a/drivers/scsi/lpfc/lpfc.h
57827+++ b/drivers/scsi/lpfc/lpfc.h
57828@@ -400,7 +400,7 @@ struct lpfc_vport {
57829 struct dentry *debug_nodelist;
57830 struct dentry *vport_debugfs_root;
57831 struct lpfc_debugfs_trc *disc_trc;
57832- atomic_t disc_trc_cnt;
57833+ atomic_unchecked_t disc_trc_cnt;
57834 #endif
57835 uint8_t stat_data_enabled;
57836 uint8_t stat_data_blocked;
57837@@ -725,8 +725,8 @@ struct lpfc_hba {
57838 struct timer_list fabric_block_timer;
57839 unsigned long bit_flags;
57840 #define FABRIC_COMANDS_BLOCKED 0
57841- atomic_t num_rsrc_err;
57842- atomic_t num_cmd_success;
57843+ atomic_unchecked_t num_rsrc_err;
57844+ atomic_unchecked_t num_cmd_success;
57845 unsigned long last_rsrc_error_time;
57846 unsigned long last_ramp_down_time;
57847 unsigned long last_ramp_up_time;
57848@@ -740,7 +740,7 @@ struct lpfc_hba {
57849 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
57850 struct dentry *debug_slow_ring_trc;
57851 struct lpfc_debugfs_trc *slow_ring_trc;
57852- atomic_t slow_ring_trc_cnt;
57853+ atomic_unchecked_t slow_ring_trc_cnt;
57854 #endif
57855
57856 /* Used for deferred freeing of ELS data buffers */
57857diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
57858index 8d0f0de..7c77a62 100644
57859--- a/drivers/scsi/lpfc/lpfc_debugfs.c
57860+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
57861@@ -124,7 +124,7 @@ struct lpfc_debug {
57862 int len;
57863 };
57864
57865-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
57866+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
57867 static unsigned long lpfc_debugfs_start_time = 0L;
57868
57869 /**
57870@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
57871 lpfc_debugfs_enable = 0;
57872
57873 len = 0;
57874- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
57875+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
57876 (lpfc_debugfs_max_disc_trc - 1);
57877 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
57878 dtp = vport->disc_trc + i;
57879@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
57880 lpfc_debugfs_enable = 0;
57881
57882 len = 0;
57883- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
57884+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
57885 (lpfc_debugfs_max_slow_ring_trc - 1);
57886 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
57887 dtp = phba->slow_ring_trc + i;
57888@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
57889 uint32_t *ptr;
57890 char buffer[1024];
57891
57892+ pax_track_stack();
57893+
57894 off = 0;
57895 spin_lock_irq(&phba->hbalock);
57896
57897@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
57898 !vport || !vport->disc_trc)
57899 return;
57900
57901- index = atomic_inc_return(&vport->disc_trc_cnt) &
57902+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
57903 (lpfc_debugfs_max_disc_trc - 1);
57904 dtp = vport->disc_trc + index;
57905 dtp->fmt = fmt;
57906 dtp->data1 = data1;
57907 dtp->data2 = data2;
57908 dtp->data3 = data3;
57909- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
57910+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
57911 dtp->jif = jiffies;
57912 #endif
57913 return;
57914@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
57915 !phba || !phba->slow_ring_trc)
57916 return;
57917
57918- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
57919+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
57920 (lpfc_debugfs_max_slow_ring_trc - 1);
57921 dtp = phba->slow_ring_trc + index;
57922 dtp->fmt = fmt;
57923 dtp->data1 = data1;
57924 dtp->data2 = data2;
57925 dtp->data3 = data3;
57926- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
57927+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
57928 dtp->jif = jiffies;
57929 #endif
57930 return;
57931@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
57932 "slow_ring buffer\n");
57933 goto debug_failed;
57934 }
57935- atomic_set(&phba->slow_ring_trc_cnt, 0);
57936+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
57937 memset(phba->slow_ring_trc, 0,
57938 (sizeof(struct lpfc_debugfs_trc) *
57939 lpfc_debugfs_max_slow_ring_trc));
57940@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
57941 "buffer\n");
57942 goto debug_failed;
57943 }
57944- atomic_set(&vport->disc_trc_cnt, 0);
57945+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
57946
57947 snprintf(name, sizeof(name), "discovery_trace");
57948 vport->debug_disc_trc =
57949diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
57950index 549bc7d..8189dbb 100644
57951--- a/drivers/scsi/lpfc/lpfc_init.c
57952+++ b/drivers/scsi/lpfc/lpfc_init.c
57953@@ -8021,8 +8021,10 @@ lpfc_init(void)
57954 printk(LPFC_COPYRIGHT "\n");
57955
57956 if (lpfc_enable_npiv) {
57957- lpfc_transport_functions.vport_create = lpfc_vport_create;
57958- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
57959+ pax_open_kernel();
57960+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
57961+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
57962+ pax_close_kernel();
57963 }
57964 lpfc_transport_template =
57965 fc_attach_transport(&lpfc_transport_functions);
57966diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
57967index c88f59f..ff2a42f 100644
57968--- a/drivers/scsi/lpfc/lpfc_scsi.c
57969+++ b/drivers/scsi/lpfc/lpfc_scsi.c
57970@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
57971 uint32_t evt_posted;
57972
57973 spin_lock_irqsave(&phba->hbalock, flags);
57974- atomic_inc(&phba->num_rsrc_err);
57975+ atomic_inc_unchecked(&phba->num_rsrc_err);
57976 phba->last_rsrc_error_time = jiffies;
57977
57978 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
57979@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
57980 unsigned long flags;
57981 struct lpfc_hba *phba = vport->phba;
57982 uint32_t evt_posted;
57983- atomic_inc(&phba->num_cmd_success);
57984+ atomic_inc_unchecked(&phba->num_cmd_success);
57985
57986 if (vport->cfg_lun_queue_depth <= queue_depth)
57987 return;
57988@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
57989 int i;
57990 struct lpfc_rport_data *rdata;
57991
57992- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
57993- num_cmd_success = atomic_read(&phba->num_cmd_success);
57994+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
57995+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
57996
57997 vports = lpfc_create_vport_work_array(phba);
57998 if (vports != NULL)
57999@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
58000 }
58001 }
58002 lpfc_destroy_vport_work_array(phba, vports);
58003- atomic_set(&phba->num_rsrc_err, 0);
58004- atomic_set(&phba->num_cmd_success, 0);
58005+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
58006+ atomic_set_unchecked(&phba->num_cmd_success, 0);
58007 }
58008
58009 /**
58010@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
58011 }
58012 }
58013 lpfc_destroy_vport_work_array(phba, vports);
58014- atomic_set(&phba->num_rsrc_err, 0);
58015- atomic_set(&phba->num_cmd_success, 0);
58016+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
58017+ atomic_set_unchecked(&phba->num_cmd_success, 0);
58018 }
58019
58020 /**
58021diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
58022index 234f0b7..3020aea 100644
58023--- a/drivers/scsi/megaraid/megaraid_mbox.c
58024+++ b/drivers/scsi/megaraid/megaraid_mbox.c
58025@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
58026 int rval;
58027 int i;
58028
58029+ pax_track_stack();
58030+
58031 // Allocate memory for the base list of scb for management module.
58032 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
58033
58034diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
58035index 7a117c1..ee01e9e 100644
58036--- a/drivers/scsi/osd/osd_initiator.c
58037+++ b/drivers/scsi/osd/osd_initiator.c
58038@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
58039 int nelem = ARRAY_SIZE(get_attrs), a = 0;
58040 int ret;
58041
58042+ pax_track_stack();
58043+
58044 or = osd_start_request(od, GFP_KERNEL);
58045 if (!or)
58046 return -ENOMEM;
58047diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
58048index 9ab8c86..9425ad3 100644
58049--- a/drivers/scsi/pmcraid.c
58050+++ b/drivers/scsi/pmcraid.c
58051@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
58052 res->scsi_dev = scsi_dev;
58053 scsi_dev->hostdata = res;
58054 res->change_detected = 0;
58055- atomic_set(&res->read_failures, 0);
58056- atomic_set(&res->write_failures, 0);
58057+ atomic_set_unchecked(&res->read_failures, 0);
58058+ atomic_set_unchecked(&res->write_failures, 0);
58059 rc = 0;
58060 }
58061 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
58062@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
58063
58064 /* If this was a SCSI read/write command keep count of errors */
58065 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
58066- atomic_inc(&res->read_failures);
58067+ atomic_inc_unchecked(&res->read_failures);
58068 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
58069- atomic_inc(&res->write_failures);
58070+ atomic_inc_unchecked(&res->write_failures);
58071
58072 if (!RES_IS_GSCSI(res->cfg_entry) &&
58073 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
58074@@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
58075
58076 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
58077 /* add resources only after host is added into system */
58078- if (!atomic_read(&pinstance->expose_resources))
58079+ if (!atomic_read_unchecked(&pinstance->expose_resources))
58080 return;
58081
58082 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
58083@@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance(
58084 init_waitqueue_head(&pinstance->reset_wait_q);
58085
58086 atomic_set(&pinstance->outstanding_cmds, 0);
58087- atomic_set(&pinstance->expose_resources, 0);
58088+ atomic_set_unchecked(&pinstance->expose_resources, 0);
58089
58090 INIT_LIST_HEAD(&pinstance->free_res_q);
58091 INIT_LIST_HEAD(&pinstance->used_res_q);
58092@@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
58093 /* Schedule worker thread to handle CCN and take care of adding and
58094 * removing devices to OS
58095 */
58096- atomic_set(&pinstance->expose_resources, 1);
58097+ atomic_set_unchecked(&pinstance->expose_resources, 1);
58098 schedule_work(&pinstance->worker_q);
58099 return rc;
58100
58101diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
58102index 3441b3f..6cbe8f7 100644
58103--- a/drivers/scsi/pmcraid.h
58104+++ b/drivers/scsi/pmcraid.h
58105@@ -690,7 +690,7 @@ struct pmcraid_instance {
58106 atomic_t outstanding_cmds;
58107
58108 /* should add/delete resources to mid-layer now ?*/
58109- atomic_t expose_resources;
58110+ atomic_unchecked_t expose_resources;
58111
58112 /* Tasklet to handle deferred processing */
58113 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
58114@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
58115 struct list_head queue; /* link to "to be exposed" resources */
58116 struct pmcraid_config_table_entry cfg_entry;
58117 struct scsi_device *scsi_dev; /* Link scsi_device structure */
58118- atomic_t read_failures; /* count of failed READ commands */
58119- atomic_t write_failures; /* count of failed WRITE commands */
58120+ atomic_unchecked_t read_failures; /* count of failed READ commands */
58121+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
58122
58123 /* To indicate add/delete/modify during CCN */
58124 u8 change_detected;
58125diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
58126index 2150618..7034215 100644
58127--- a/drivers/scsi/qla2xxx/qla_def.h
58128+++ b/drivers/scsi/qla2xxx/qla_def.h
58129@@ -2089,7 +2089,7 @@ struct isp_operations {
58130
58131 int (*get_flash_version) (struct scsi_qla_host *, void *);
58132 int (*start_scsi) (srb_t *);
58133-};
58134+} __no_const;
58135
58136 /* MSI-X Support *************************************************************/
58137
58138diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
58139index 81b5f29..2ae1fad 100644
58140--- a/drivers/scsi/qla4xxx/ql4_def.h
58141+++ b/drivers/scsi/qla4xxx/ql4_def.h
58142@@ -240,7 +240,7 @@ struct ddb_entry {
58143 atomic_t retry_relogin_timer; /* Min Time between relogins
58144 * (4000 only) */
58145 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
58146- atomic_t relogin_retry_count; /* Num of times relogin has been
58147+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
58148 * retried */
58149
58150 uint16_t port;
58151diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
58152index af8c323..515dd51 100644
58153--- a/drivers/scsi/qla4xxx/ql4_init.c
58154+++ b/drivers/scsi/qla4xxx/ql4_init.c
58155@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
58156 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
58157 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
58158 atomic_set(&ddb_entry->relogin_timer, 0);
58159- atomic_set(&ddb_entry->relogin_retry_count, 0);
58160+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
58161 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
58162 list_add_tail(&ddb_entry->list, &ha->ddb_list);
58163 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
58164@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
58165 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
58166 atomic_set(&ddb_entry->port_down_timer,
58167 ha->port_down_retry_count);
58168- atomic_set(&ddb_entry->relogin_retry_count, 0);
58169+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
58170 atomic_set(&ddb_entry->relogin_timer, 0);
58171 clear_bit(DF_RELOGIN, &ddb_entry->flags);
58172 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
58173diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
58174index 83c8b5e..a82b348 100644
58175--- a/drivers/scsi/qla4xxx/ql4_os.c
58176+++ b/drivers/scsi/qla4xxx/ql4_os.c
58177@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
58178 ddb_entry->fw_ddb_device_state ==
58179 DDB_DS_SESSION_FAILED) {
58180 /* Reset retry relogin timer */
58181- atomic_inc(&ddb_entry->relogin_retry_count);
58182+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
58183 DEBUG2(printk("scsi%ld: index[%d] relogin"
58184 " timed out-retrying"
58185 " relogin (%d)\n",
58186 ha->host_no,
58187 ddb_entry->fw_ddb_index,
58188- atomic_read(&ddb_entry->
58189+ atomic_read_unchecked(&ddb_entry->
58190 relogin_retry_count))
58191 );
58192 start_dpc++;
58193diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
58194index dd098ca..686ce01 100644
58195--- a/drivers/scsi/scsi.c
58196+++ b/drivers/scsi/scsi.c
58197@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
58198 unsigned long timeout;
58199 int rtn = 0;
58200
58201- atomic_inc(&cmd->device->iorequest_cnt);
58202+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
58203
58204 /* check if the device is still usable */
58205 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
58206diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
58207index bc3e363..e1a8e50 100644
58208--- a/drivers/scsi/scsi_debug.c
58209+++ b/drivers/scsi/scsi_debug.c
58210@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
58211 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
58212 unsigned char *cmd = (unsigned char *)scp->cmnd;
58213
58214+ pax_track_stack();
58215+
58216 if ((errsts = check_readiness(scp, 1, devip)))
58217 return errsts;
58218 memset(arr, 0, sizeof(arr));
58219@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
58220 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
58221 unsigned char *cmd = (unsigned char *)scp->cmnd;
58222
58223+ pax_track_stack();
58224+
58225 if ((errsts = check_readiness(scp, 1, devip)))
58226 return errsts;
58227 memset(arr, 0, sizeof(arr));
58228diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
58229index 8df12522..c4c1472 100644
58230--- a/drivers/scsi/scsi_lib.c
58231+++ b/drivers/scsi/scsi_lib.c
58232@@ -1389,7 +1389,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
58233 shost = sdev->host;
58234 scsi_init_cmd_errh(cmd);
58235 cmd->result = DID_NO_CONNECT << 16;
58236- atomic_inc(&cmd->device->iorequest_cnt);
58237+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
58238
58239 /*
58240 * SCSI request completion path will do scsi_device_unbusy(),
58241@@ -1420,9 +1420,9 @@ static void scsi_softirq_done(struct request *rq)
58242 */
58243 cmd->serial_number = 0;
58244
58245- atomic_inc(&cmd->device->iodone_cnt);
58246+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
58247 if (cmd->result)
58248- atomic_inc(&cmd->device->ioerr_cnt);
58249+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
58250
58251 disposition = scsi_decide_disposition(cmd);
58252 if (disposition != SUCCESS &&
58253diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
58254index 91a93e0..eae0fe3 100644
58255--- a/drivers/scsi/scsi_sysfs.c
58256+++ b/drivers/scsi/scsi_sysfs.c
58257@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
58258 char *buf) \
58259 { \
58260 struct scsi_device *sdev = to_scsi_device(dev); \
58261- unsigned long long count = atomic_read(&sdev->field); \
58262+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
58263 return snprintf(buf, 20, "0x%llx\n", count); \
58264 } \
58265 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
58266diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
58267index 1030327..f91fd30 100644
58268--- a/drivers/scsi/scsi_tgt_lib.c
58269+++ b/drivers/scsi/scsi_tgt_lib.c
58270@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
58271 int err;
58272
58273 dprintk("%lx %u\n", uaddr, len);
58274- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
58275+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
58276 if (err) {
58277 /*
58278 * TODO: need to fixup sg_tablesize, max_segment_size,
58279diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
58280index db02e31..1b42ea9 100644
58281--- a/drivers/scsi/scsi_transport_fc.c
58282+++ b/drivers/scsi/scsi_transport_fc.c
58283@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
58284 * Netlink Infrastructure
58285 */
58286
58287-static atomic_t fc_event_seq;
58288+static atomic_unchecked_t fc_event_seq;
58289
58290 /**
58291 * fc_get_event_number - Obtain the next sequential FC event number
58292@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
58293 u32
58294 fc_get_event_number(void)
58295 {
58296- return atomic_add_return(1, &fc_event_seq);
58297+ return atomic_add_return_unchecked(1, &fc_event_seq);
58298 }
58299 EXPORT_SYMBOL(fc_get_event_number);
58300
58301@@ -641,7 +641,7 @@ static __init int fc_transport_init(void)
58302 {
58303 int error;
58304
58305- atomic_set(&fc_event_seq, 0);
58306+ atomic_set_unchecked(&fc_event_seq, 0);
58307
58308 error = transport_class_register(&fc_host_class);
58309 if (error)
58310diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
58311index de2f8c4..63c5278 100644
58312--- a/drivers/scsi/scsi_transport_iscsi.c
58313+++ b/drivers/scsi/scsi_transport_iscsi.c
58314@@ -81,7 +81,7 @@ struct iscsi_internal {
58315 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
58316 };
58317
58318-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
58319+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
58320 static struct workqueue_struct *iscsi_eh_timer_workq;
58321
58322 /*
58323@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
58324 int err;
58325
58326 ihost = shost->shost_data;
58327- session->sid = atomic_add_return(1, &iscsi_session_nr);
58328+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
58329
58330 if (id == ISCSI_MAX_TARGET) {
58331 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
58332@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void)
58333 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
58334 ISCSI_TRANSPORT_VERSION);
58335
58336- atomic_set(&iscsi_session_nr, 0);
58337+ atomic_set_unchecked(&iscsi_session_nr, 0);
58338
58339 err = class_register(&iscsi_transport_class);
58340 if (err)
58341diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
58342index 21a045e..ec89e03 100644
58343--- a/drivers/scsi/scsi_transport_srp.c
58344+++ b/drivers/scsi/scsi_transport_srp.c
58345@@ -33,7 +33,7 @@
58346 #include "scsi_transport_srp_internal.h"
58347
58348 struct srp_host_attrs {
58349- atomic_t next_port_id;
58350+ atomic_unchecked_t next_port_id;
58351 };
58352 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
58353
58354@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
58355 struct Scsi_Host *shost = dev_to_shost(dev);
58356 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
58357
58358- atomic_set(&srp_host->next_port_id, 0);
58359+ atomic_set_unchecked(&srp_host->next_port_id, 0);
58360 return 0;
58361 }
58362
58363@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
58364 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
58365 rport->roles = ids->roles;
58366
58367- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
58368+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
58369 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
58370
58371 transport_setup_device(&rport->dev);
58372diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
58373index 040f751..98a5ed2 100644
58374--- a/drivers/scsi/sg.c
58375+++ b/drivers/scsi/sg.c
58376@@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
58377 sdp->disk->disk_name,
58378 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
58379 NULL,
58380- (char *)arg);
58381+ (char __user *)arg);
58382 case BLKTRACESTART:
58383 return blk_trace_startstop(sdp->device->request_queue, 1);
58384 case BLKTRACESTOP:
58385@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
58386 const struct file_operations * fops;
58387 };
58388
58389-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
58390+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
58391 {"allow_dio", &adio_fops},
58392 {"debug", &debug_fops},
58393 {"def_reserved_size", &dressz_fops},
58394@@ -2307,7 +2307,7 @@ sg_proc_init(void)
58395 {
58396 int k, mask;
58397 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
58398- struct sg_proc_leaf * leaf;
58399+ const struct sg_proc_leaf * leaf;
58400
58401 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
58402 if (!sg_proc_sgp)
58403diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
58404index c19ca5e..3eb5959 100644
58405--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
58406+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
58407@@ -1758,6 +1758,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
58408 int do_iounmap = 0;
58409 int do_disable_device = 1;
58410
58411+ pax_track_stack();
58412+
58413 memset(&sym_dev, 0, sizeof(sym_dev));
58414 memset(&nvram, 0, sizeof(nvram));
58415 sym_dev.pdev = pdev;
58416diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
58417new file mode 100644
58418index 0000000..eabb432
58419--- /dev/null
58420+++ b/drivers/scsi/vmw_pvscsi.c
58421@@ -0,0 +1,1401 @@
58422+/*
58423+ * Linux driver for VMware's para-virtualized SCSI HBA.
58424+ *
58425+ * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
58426+ *
58427+ * This program is free software; you can redistribute it and/or modify it
58428+ * under the terms of the GNU General Public License as published by the
58429+ * Free Software Foundation; version 2 of the License and no later version.
58430+ *
58431+ * This program is distributed in the hope that it will be useful, but
58432+ * WITHOUT ANY WARRANTY; without even the implied warranty of
58433+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
58434+ * NON INFRINGEMENT. See the GNU General Public License for more
58435+ * details.
58436+ *
58437+ * You should have received a copy of the GNU General Public License
58438+ * along with this program; if not, write to the Free Software
58439+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
58440+ *
58441+ * Maintained by: Alok N Kataria <akataria@vmware.com>
58442+ *
58443+ */
58444+
58445+#include <linux/kernel.h>
58446+#include <linux/module.h>
58447+#include <linux/moduleparam.h>
58448+#include <linux/types.h>
58449+#include <linux/interrupt.h>
58450+#include <linux/workqueue.h>
58451+#include <linux/pci.h>
58452+
58453+#include <scsi/scsi.h>
58454+#include <scsi/scsi_host.h>
58455+#include <scsi/scsi_cmnd.h>
58456+#include <scsi/scsi_device.h>
58457+
58458+#include "vmw_pvscsi.h"
58459+
58460+#define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver"
58461+
58462+MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC);
58463+MODULE_AUTHOR("VMware, Inc.");
58464+MODULE_LICENSE("GPL");
58465+MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING);
58466+
58467+#define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8
58468+#define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1
58469+#define PVSCSI_DEFAULT_QUEUE_DEPTH 64
58470+#define SGL_SIZE PAGE_SIZE
58471+
58472+#define pvscsi_dev(adapter) (&(adapter->dev->dev))
58473+
58474+struct pvscsi_sg_list {
58475+ struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT];
58476+};
58477+
58478+struct pvscsi_ctx {
58479+ /*
58480+ * The index of the context in cmd_map serves as the context ID for a
58481+ * 1-to-1 mapping completions back to requests.
58482+ */
58483+ struct scsi_cmnd *cmd;
58484+ struct pvscsi_sg_list *sgl;
58485+ struct list_head list;
58486+ dma_addr_t dataPA;
58487+ dma_addr_t sensePA;
58488+ dma_addr_t sglPA;
58489+};
58490+
58491+struct pvscsi_adapter {
58492+ char *mmioBase;
58493+ unsigned int irq;
58494+ u8 rev;
58495+ bool use_msi;
58496+ bool use_msix;
58497+ bool use_msg;
58498+
58499+ spinlock_t hw_lock;
58500+
58501+ struct workqueue_struct *workqueue;
58502+ struct work_struct work;
58503+
58504+ struct PVSCSIRingReqDesc *req_ring;
58505+ unsigned req_pages;
58506+ unsigned req_depth;
58507+ dma_addr_t reqRingPA;
58508+
58509+ struct PVSCSIRingCmpDesc *cmp_ring;
58510+ unsigned cmp_pages;
58511+ dma_addr_t cmpRingPA;
58512+
58513+ struct PVSCSIRingMsgDesc *msg_ring;
58514+ unsigned msg_pages;
58515+ dma_addr_t msgRingPA;
58516+
58517+ struct PVSCSIRingsState *rings_state;
58518+ dma_addr_t ringStatePA;
58519+
58520+ struct pci_dev *dev;
58521+ struct Scsi_Host *host;
58522+
58523+ struct list_head cmd_pool;
58524+ struct pvscsi_ctx *cmd_map;
58525+};
58526+
58527+
58528+/* Command line parameters */
58529+static int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
58530+static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
58531+static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH;
58532+static bool pvscsi_disable_msi;
58533+static bool pvscsi_disable_msix;
58534+static bool pvscsi_use_msg = true;
58535+
58536+#define PVSCSI_RW (S_IRUSR | S_IWUSR)
58537+
58538+module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);
58539+MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
58540+ __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")");
58541+
58542+module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);
58543+MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
58544+ __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")");
58545+
58546+module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);
58547+MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
58548+ __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")");
58549+
58550+module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);
58551+MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
58552+
58553+module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW);
58554+MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
58555+
58556+module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);
58557+MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
58558+
58559+static const struct pci_device_id pvscsi_pci_tbl[] = {
58560+ { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
58561+ { 0 }
58562+};
58563+
58564+MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl);
58565+
58566+static struct pvscsi_ctx *
58567+pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
58568+{
58569+ struct pvscsi_ctx *ctx, *end;
58570+
58571+ end = &adapter->cmd_map[adapter->req_depth];
58572+ for (ctx = adapter->cmd_map; ctx < end; ctx++)
58573+ if (ctx->cmd == cmd)
58574+ return ctx;
58575+
58576+ return NULL;
58577+}
58578+
58579+static struct pvscsi_ctx *
58580+pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
58581+{
58582+ struct pvscsi_ctx *ctx;
58583+
58584+ if (list_empty(&adapter->cmd_pool))
58585+ return NULL;
58586+
58587+ ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list);
58588+ ctx->cmd = cmd;
58589+ list_del(&ctx->list);
58590+
58591+ return ctx;
58592+}
58593+
58594+static void pvscsi_release_context(struct pvscsi_adapter *adapter,
58595+ struct pvscsi_ctx *ctx)
58596+{
58597+ ctx->cmd = NULL;
58598+ list_add(&ctx->list, &adapter->cmd_pool);
58599+}
58600+
58601+/*
58602+ * Map a pvscsi_ctx struct to a context ID field value; we map to a simple
58603+ * non-zero integer. ctx always points to an entry in cmd_map array, hence
58604+ * the return value is always >=1.
58605+ */
58606+static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter,
58607+ const struct pvscsi_ctx *ctx)
58608+{
58609+ return ctx - adapter->cmd_map + 1;
58610+}
58611+
58612+static struct pvscsi_ctx *
58613+pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context)
58614+{
58615+ return &adapter->cmd_map[context - 1];
58616+}
58617+
58618+static void pvscsi_reg_write(const struct pvscsi_adapter *adapter,
58619+ u32 offset, u32 val)
58620+{
58621+ writel(val, adapter->mmioBase + offset);
58622+}
58623+
58624+static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset)
58625+{
58626+ return readl(adapter->mmioBase + offset);
58627+}
58628+
58629+static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter)
58630+{
58631+ return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS);
58632+}
58633+
58634+static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter,
58635+ u32 val)
58636+{
58637+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val);
58638+}
58639+
58640+static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter)
58641+{
58642+ u32 intr_bits;
58643+
58644+ intr_bits = PVSCSI_INTR_CMPL_MASK;
58645+ if (adapter->use_msg)
58646+ intr_bits |= PVSCSI_INTR_MSG_MASK;
58647+
58648+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits);
58649+}
58650+
58651+static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter)
58652+{
58653+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0);
58654+}
58655+
58656+static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter,
58657+ u32 cmd, const void *desc, size_t len)
58658+{
58659+ const u32 *ptr = desc;
58660+ size_t i;
58661+
58662+ len /= sizeof(*ptr);
58663+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd);
58664+ for (i = 0; i < len; i++)
58665+ pvscsi_reg_write(adapter,
58666+ PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]);
58667+}
58668+
58669+static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter,
58670+ const struct pvscsi_ctx *ctx)
58671+{
58672+ struct PVSCSICmdDescAbortCmd cmd = { 0 };
58673+
58674+ cmd.target = ctx->cmd->device->id;
58675+ cmd.context = pvscsi_map_context(adapter, ctx);
58676+
58677+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
58678+}
58679+
58680+static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter)
58681+{
58682+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
58683+}
58684+
58685+static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter)
58686+{
58687+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
58688+}
58689+
58690+static int scsi_is_rw(unsigned char op)
58691+{
58692+ return op == READ_6 || op == WRITE_6 ||
58693+ op == READ_10 || op == WRITE_10 ||
58694+ op == READ_12 || op == WRITE_12 ||
58695+ op == READ_16 || op == WRITE_16;
58696+}
58697+
58698+static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,
58699+ unsigned char op)
58700+{
58701+ if (scsi_is_rw(op))
58702+ pvscsi_kick_rw_io(adapter);
58703+ else
58704+ pvscsi_process_request_ring(adapter);
58705+}
58706+
58707+static void ll_adapter_reset(const struct pvscsi_adapter *adapter)
58708+{
58709+ dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter);
58710+
58711+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
58712+}
58713+
58714+static void ll_bus_reset(const struct pvscsi_adapter *adapter)
58715+{
58716+ dev_dbg(pvscsi_dev(adapter), "Reseting bus on %p\n", adapter);
58717+
58718+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0);
58719+}
58720+
58721+static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target)
58722+{
58723+ struct PVSCSICmdDescResetDevice cmd = { 0 };
58724+
58725+ dev_dbg(pvscsi_dev(adapter), "Reseting device: target=%u\n", target);
58726+
58727+ cmd.target = target;
58728+
58729+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE,
58730+ &cmd, sizeof(cmd));
58731+}
58732+
58733+static void pvscsi_create_sg(struct pvscsi_ctx *ctx,
58734+ struct scatterlist *sg, unsigned count)
58735+{
58736+ unsigned i;
58737+ struct PVSCSISGElement *sge;
58738+
58739+ BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
58740+
58741+ sge = &ctx->sgl->sge[0];
58742+ for (i = 0; i < count; i++, sg++) {
58743+ sge[i].addr = sg_dma_address(sg);
58744+ sge[i].length = sg_dma_len(sg);
58745+ sge[i].flags = 0;
58746+ }
58747+}
58748+
58749+/*
58750+ * Map all data buffers for a command into PCI space and
58751+ * setup the scatter/gather list if needed.
58752+ */
58753+static void pvscsi_map_buffers(struct pvscsi_adapter *adapter,
58754+ struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd,
58755+ struct PVSCSIRingReqDesc *e)
58756+{
58757+ unsigned count;
58758+ unsigned bufflen = scsi_bufflen(cmd);
58759+ struct scatterlist *sg;
58760+
58761+ e->dataLen = bufflen;
58762+ e->dataAddr = 0;
58763+ if (bufflen == 0)
58764+ return;
58765+
58766+ sg = scsi_sglist(cmd);
58767+ count = scsi_sg_count(cmd);
58768+ if (count != 0) {
58769+ int segs = scsi_dma_map(cmd);
58770+ if (segs > 1) {
58771+ pvscsi_create_sg(ctx, sg, segs);
58772+
58773+ e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
58774+ ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
58775+ SGL_SIZE, PCI_DMA_TODEVICE);
58776+ e->dataAddr = ctx->sglPA;
58777+ } else
58778+ e->dataAddr = sg_dma_address(sg);
58779+ } else {
58780+ /*
58781+ * In case there is no S/G list, scsi_sglist points
58782+ * directly to the buffer.
58783+ */
58784+ ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
58785+ cmd->sc_data_direction);
58786+ e->dataAddr = ctx->dataPA;
58787+ }
58788+}
58789+
58790+static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
58791+ struct pvscsi_ctx *ctx)
58792+{
58793+ struct scsi_cmnd *cmd;
58794+ unsigned bufflen;
58795+
58796+ cmd = ctx->cmd;
58797+ bufflen = scsi_bufflen(cmd);
58798+
58799+ if (bufflen != 0) {
58800+ unsigned count = scsi_sg_count(cmd);
58801+
58802+ if (count != 0) {
58803+ scsi_dma_unmap(cmd);
58804+ if (ctx->sglPA) {
58805+ pci_unmap_single(adapter->dev, ctx->sglPA,
58806+ SGL_SIZE, PCI_DMA_TODEVICE);
58807+ ctx->sglPA = 0;
58808+ }
58809+ } else
58810+ pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
58811+ cmd->sc_data_direction);
58812+ }
58813+ if (cmd->sense_buffer)
58814+ pci_unmap_single(adapter->dev, ctx->sensePA,
58815+ SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
58816+}
58817+
58818+static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
58819+{
58820+ adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
58821+ &adapter->ringStatePA);
58822+ if (!adapter->rings_state)
58823+ return -ENOMEM;
58824+
58825+ adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING,
58826+ pvscsi_ring_pages);
58827+ adapter->req_depth = adapter->req_pages
58828+ * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
58829+ adapter->req_ring = pci_alloc_consistent(adapter->dev,
58830+ adapter->req_pages * PAGE_SIZE,
58831+ &adapter->reqRingPA);
58832+ if (!adapter->req_ring)
58833+ return -ENOMEM;
58834+
58835+ adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
58836+ pvscsi_ring_pages);
58837+ adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
58838+ adapter->cmp_pages * PAGE_SIZE,
58839+ &adapter->cmpRingPA);
58840+ if (!adapter->cmp_ring)
58841+ return -ENOMEM;
58842+
58843+ BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE));
58844+ BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE));
58845+ BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE));
58846+
58847+ if (!adapter->use_msg)
58848+ return 0;
58849+
58850+ adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
58851+ pvscsi_msg_ring_pages);
58852+ adapter->msg_ring = pci_alloc_consistent(adapter->dev,
58853+ adapter->msg_pages * PAGE_SIZE,
58854+ &adapter->msgRingPA);
58855+ if (!adapter->msg_ring)
58856+ return -ENOMEM;
58857+ BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
58858+
58859+ return 0;
58860+}
58861+
58862+static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
58863+{
58864+ struct PVSCSICmdDescSetupRings cmd = { 0 };
58865+ dma_addr_t base;
58866+ unsigned i;
58867+
58868+ cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
58869+ cmd.reqRingNumPages = adapter->req_pages;
58870+ cmd.cmpRingNumPages = adapter->cmp_pages;
58871+
58872+ base = adapter->reqRingPA;
58873+ for (i = 0; i < adapter->req_pages; i++) {
58874+ cmd.reqRingPPNs[i] = base >> PAGE_SHIFT;
58875+ base += PAGE_SIZE;
58876+ }
58877+
58878+ base = adapter->cmpRingPA;
58879+ for (i = 0; i < adapter->cmp_pages; i++) {
58880+ cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT;
58881+ base += PAGE_SIZE;
58882+ }
58883+
58884+ memset(adapter->rings_state, 0, PAGE_SIZE);
58885+ memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE);
58886+ memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE);
58887+
58888+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS,
58889+ &cmd, sizeof(cmd));
58890+
58891+ if (adapter->use_msg) {
58892+ struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 };
58893+
58894+ cmd_msg.numPages = adapter->msg_pages;
58895+
58896+ base = adapter->msgRingPA;
58897+ for (i = 0; i < adapter->msg_pages; i++) {
58898+ cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT;
58899+ base += PAGE_SIZE;
58900+ }
58901+ memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE);
58902+
58903+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING,
58904+ &cmd_msg, sizeof(cmd_msg));
58905+ }
58906+}
58907+
58908+/*
58909+ * Pull a completion descriptor off and pass the completion back
58910+ * to the SCSI mid layer.
58911+ */
58912+static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
58913+ const struct PVSCSIRingCmpDesc *e)
58914+{
58915+ struct pvscsi_ctx *ctx;
58916+ struct scsi_cmnd *cmd;
58917+ u32 btstat = e->hostStatus;
58918+ u32 sdstat = e->scsiStatus;
58919+
58920+ ctx = pvscsi_get_context(adapter, e->context);
58921+ cmd = ctx->cmd;
58922+ pvscsi_unmap_buffers(adapter, ctx);
58923+ pvscsi_release_context(adapter, ctx);
58924+ cmd->result = 0;
58925+
58926+ if (sdstat != SAM_STAT_GOOD &&
58927+ (btstat == BTSTAT_SUCCESS ||
58928+ btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
58929+ btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
58930+ cmd->result = (DID_OK << 16) | sdstat;
58931+ if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
58932+ cmd->result |= (DRIVER_SENSE << 24);
58933+ } else
58934+ switch (btstat) {
58935+ case BTSTAT_SUCCESS:
58936+ case BTSTAT_LINKED_COMMAND_COMPLETED:
58937+ case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
58938+ /* If everything went fine, let's move on.. */
58939+ cmd->result = (DID_OK << 16);
58940+ break;
58941+
58942+ case BTSTAT_DATARUN:
58943+ case BTSTAT_DATA_UNDERRUN:
58944+ /* Report residual data in underruns */
58945+ scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
58946+ cmd->result = (DID_ERROR << 16);
58947+ break;
58948+
58949+ case BTSTAT_SELTIMEO:
58950+ /* Our emulation returns this for non-connected devs */
58951+ cmd->result = (DID_BAD_TARGET << 16);
58952+ break;
58953+
58954+ case BTSTAT_LUNMISMATCH:
58955+ case BTSTAT_TAGREJECT:
58956+ case BTSTAT_BADMSG:
58957+ cmd->result = (DRIVER_INVALID << 24);
58958+ /* fall through */
58959+
58960+ case BTSTAT_HAHARDWARE:
58961+ case BTSTAT_INVPHASE:
58962+ case BTSTAT_HATIMEOUT:
58963+ case BTSTAT_NORESPONSE:
58964+ case BTSTAT_DISCONNECT:
58965+ case BTSTAT_HASOFTWARE:
58966+ case BTSTAT_BUSFREE:
58967+ case BTSTAT_SENSFAILED:
58968+ cmd->result |= (DID_ERROR << 16);
58969+ break;
58970+
58971+ case BTSTAT_SENTRST:
58972+ case BTSTAT_RECVRST:
58973+ case BTSTAT_BUSRESET:
58974+ cmd->result = (DID_RESET << 16);
58975+ break;
58976+
58977+ case BTSTAT_ABORTQUEUE:
58978+ cmd->result = (DID_ABORT << 16);
58979+ break;
58980+
58981+ case BTSTAT_SCSIPARITY:
58982+ cmd->result = (DID_PARITY << 16);
58983+ break;
58984+
58985+ default:
58986+ cmd->result = (DID_ERROR << 16);
58987+ scmd_printk(KERN_DEBUG, cmd,
58988+ "Unknown completion status: 0x%x\n",
58989+ btstat);
58990+ }
58991+
58992+ dev_dbg(&cmd->device->sdev_gendev,
58993+ "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n",
58994+ cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat);
58995+
58996+ cmd->scsi_done(cmd);
58997+}
58998+
58999+/*
59000+ * barrier usage : Since the PVSCSI device is emulated, there could be cases
59001+ * where we may want to serialize some accesses between the driver and the
59002+ * emulation layer. We use compiler barriers instead of the more expensive
59003+ * memory barriers because PVSCSI is only supported on X86 which has strong
59004+ * memory access ordering.
59005+ */
59006+static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter)
59007+{
59008+ struct PVSCSIRingsState *s = adapter->rings_state;
59009+ struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring;
59010+ u32 cmp_entries = s->cmpNumEntriesLog2;
59011+
59012+ while (s->cmpConsIdx != s->cmpProdIdx) {
59013+ struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx &
59014+ MASK(cmp_entries));
59015+ /*
59016+ * This barrier() ensures that *e is not dereferenced while
59017+ * the device emulation still writes data into the slot.
59018+ * Since the device emulation advances s->cmpProdIdx only after
59019+ * updating the slot we want to check it first.
59020+ */
59021+ barrier();
59022+ pvscsi_complete_request(adapter, e);
59023+ /*
59024+ * This barrier() ensures that compiler doesn't reorder write
59025+ * to s->cmpConsIdx before the read of (*e) inside
59026+ * pvscsi_complete_request. Otherwise, device emulation may
59027+ * overwrite *e before we had a chance to read it.
59028+ */
59029+ barrier();
59030+ s->cmpConsIdx++;
59031+ }
59032+}
59033+
59034+/*
59035+ * Translate a Linux SCSI request into a request ring entry.
59036+ */
59037+static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
59038+ struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd)
59039+{
59040+ struct PVSCSIRingsState *s;
59041+ struct PVSCSIRingReqDesc *e;
59042+ struct scsi_device *sdev;
59043+ u32 req_entries;
59044+
59045+ s = adapter->rings_state;
59046+ sdev = cmd->device;
59047+ req_entries = s->reqNumEntriesLog2;
59048+
59049+ /*
59050+ * If this condition holds, we might have room on the request ring, but
59051+ * we might not have room on the completion ring for the response.
59052+ * However, we have already ruled out this possibility - we would not
59053+ * have successfully allocated a context if it were true, since we only
59054+ * have one context per request entry. Check for it anyway, since it
59055+ * would be a serious bug.
59056+ */
59057+ if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) {
59058+ scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: "
59059+ "ring full: reqProdIdx=%d cmpConsIdx=%d\n",
59060+ s->reqProdIdx, s->cmpConsIdx);
59061+ return -1;
59062+ }
59063+
59064+ e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries));
59065+
59066+ e->bus = sdev->channel;
59067+ e->target = sdev->id;
59068+ memset(e->lun, 0, sizeof(e->lun));
59069+ e->lun[1] = sdev->lun;
59070+
59071+ if (cmd->sense_buffer) {
59072+ ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
59073+ SCSI_SENSE_BUFFERSIZE,
59074+ PCI_DMA_FROMDEVICE);
59075+ e->senseAddr = ctx->sensePA;
59076+ e->senseLen = SCSI_SENSE_BUFFERSIZE;
59077+ } else {
59078+ e->senseLen = 0;
59079+ e->senseAddr = 0;
59080+ }
59081+ e->cdbLen = cmd->cmd_len;
59082+ e->vcpuHint = smp_processor_id();
59083+ memcpy(e->cdb, cmd->cmnd, e->cdbLen);
59084+
59085+ e->tag = SIMPLE_QUEUE_TAG;
59086+ if (sdev->tagged_supported &&
59087+ (cmd->tag == HEAD_OF_QUEUE_TAG ||
59088+ cmd->tag == ORDERED_QUEUE_TAG))
59089+ e->tag = cmd->tag;
59090+
59091+ if (cmd->sc_data_direction == DMA_FROM_DEVICE)
59092+ e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
59093+ else if (cmd->sc_data_direction == DMA_TO_DEVICE)
59094+ e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
59095+ else if (cmd->sc_data_direction == DMA_NONE)
59096+ e->flags = PVSCSI_FLAG_CMD_DIR_NONE;
59097+ else
59098+ e->flags = 0;
59099+
59100+ pvscsi_map_buffers(adapter, ctx, cmd, e);
59101+
59102+ e->context = pvscsi_map_context(adapter, ctx);
59103+
59104+ barrier();
59105+
59106+ s->reqProdIdx++;
59107+
59108+ return 0;
59109+}
59110+
59111+static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
59112+{
59113+ struct Scsi_Host *host = cmd->device->host;
59114+ struct pvscsi_adapter *adapter = shost_priv(host);
59115+ struct pvscsi_ctx *ctx;
59116+ unsigned long flags;
59117+
59118+ spin_lock_irqsave(&adapter->hw_lock, flags);
59119+
59120+ ctx = pvscsi_acquire_context(adapter, cmd);
59121+ if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) {
59122+ if (ctx)
59123+ pvscsi_release_context(adapter, ctx);
59124+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59125+ return SCSI_MLQUEUE_HOST_BUSY;
59126+ }
59127+
59128+ cmd->scsi_done = done;
59129+
59130+ dev_dbg(&cmd->device->sdev_gendev,
59131+ "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
59132+
59133+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59134+
59135+ pvscsi_kick_io(adapter, cmd->cmnd[0]);
59136+
59137+ return 0;
59138+}
59139+
59140+static int pvscsi_abort(struct scsi_cmnd *cmd)
59141+{
59142+ struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
59143+ struct pvscsi_ctx *ctx;
59144+ unsigned long flags;
59145+
59146+ scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
59147+ adapter->host->host_no, cmd);
59148+
59149+ spin_lock_irqsave(&adapter->hw_lock, flags);
59150+
59151+ /*
59152+ * Poll the completion ring first - we might be trying to abort
59153+ * a command that is waiting to be dispatched in the completion ring.
59154+ */
59155+ pvscsi_process_completion_ring(adapter);
59156+
59157+ /*
59158+ * If there is no context for the command, it either already succeeded
59159+ * or else was never properly issued. Not our problem.
59160+ */
59161+ ctx = pvscsi_find_context(adapter, cmd);
59162+ if (!ctx) {
59163+ scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd);
59164+ goto out;
59165+ }
59166+
59167+ pvscsi_abort_cmd(adapter, ctx);
59168+
59169+ pvscsi_process_completion_ring(adapter);
59170+
59171+out:
59172+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59173+ return SUCCESS;
59174+}
59175+
59176+/*
59177+ * Abort all outstanding requests. This is only safe to use if the completion
59178+ * ring will never be walked again or the device has been reset, because it
59179+ * destroys the 1-1 mapping between context field passed to emulation and our
59180+ * request structure.
59181+ */
59182+static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
59183+{
59184+ unsigned i;
59185+
59186+ for (i = 0; i < adapter->req_depth; i++) {
59187+ struct pvscsi_ctx *ctx = &adapter->cmd_map[i];
59188+ struct scsi_cmnd *cmd = ctx->cmd;
59189+ if (cmd) {
59190+ scmd_printk(KERN_ERR, cmd,
59191+ "Forced reset on cmd %p\n", cmd);
59192+ pvscsi_unmap_buffers(adapter, ctx);
59193+ pvscsi_release_context(adapter, ctx);
59194+ cmd->result = (DID_RESET << 16);
59195+ cmd->scsi_done(cmd);
59196+ }
59197+ }
59198+}
59199+
59200+static int pvscsi_host_reset(struct scsi_cmnd *cmd)
59201+{
59202+ struct Scsi_Host *host = cmd->device->host;
59203+ struct pvscsi_adapter *adapter = shost_priv(host);
59204+ unsigned long flags;
59205+ bool use_msg;
59206+
59207+ scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n");
59208+
59209+ spin_lock_irqsave(&adapter->hw_lock, flags);
59210+
59211+ use_msg = adapter->use_msg;
59212+
59213+ if (use_msg) {
59214+ adapter->use_msg = 0;
59215+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59216+
59217+ /*
59218+ * Now that we know that the ISR won't add more work on the
59219+ * workqueue we can safely flush any outstanding work.
59220+ */
59221+ flush_workqueue(adapter->workqueue);
59222+ spin_lock_irqsave(&adapter->hw_lock, flags);
59223+ }
59224+
59225+ /*
59226+ * We're going to tear down the entire ring structure and set it back
59227+ * up, so stalling new requests until all completions are flushed and
59228+ * the rings are back in place.
59229+ */
59230+
59231+ pvscsi_process_request_ring(adapter);
59232+
59233+ ll_adapter_reset(adapter);
59234+
59235+ /*
59236+ * Now process any completions. Note we do this AFTER adapter reset,
59237+ * which is strange, but stops races where completions get posted
59238+ * between processing the ring and issuing the reset. The backend will
59239+ * not touch the ring memory after reset, so the immediately pre-reset
59240+ * completion ring state is still valid.
59241+ */
59242+ pvscsi_process_completion_ring(adapter);
59243+
59244+ pvscsi_reset_all(adapter);
59245+ adapter->use_msg = use_msg;
59246+ pvscsi_setup_all_rings(adapter);
59247+ pvscsi_unmask_intr(adapter);
59248+
59249+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59250+
59251+ return SUCCESS;
59252+}
59253+
59254+static int pvscsi_bus_reset(struct scsi_cmnd *cmd)
59255+{
59256+ struct Scsi_Host *host = cmd->device->host;
59257+ struct pvscsi_adapter *adapter = shost_priv(host);
59258+ unsigned long flags;
59259+
59260+ scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n");
59261+
59262+ /*
59263+ * We don't want to queue new requests for this bus after
59264+ * flushing all pending requests to emulation, since new
59265+ * requests could then sneak in during this bus reset phase,
59266+ * so take the lock now.
59267+ */
59268+ spin_lock_irqsave(&adapter->hw_lock, flags);
59269+
59270+ pvscsi_process_request_ring(adapter);
59271+ ll_bus_reset(adapter);
59272+ pvscsi_process_completion_ring(adapter);
59273+
59274+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59275+
59276+ return SUCCESS;
59277+}
59278+
59279+static int pvscsi_device_reset(struct scsi_cmnd *cmd)
59280+{
59281+ struct Scsi_Host *host = cmd->device->host;
59282+ struct pvscsi_adapter *adapter = shost_priv(host);
59283+ unsigned long flags;
59284+
59285+ scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n",
59286+ host->host_no, cmd->device->id);
59287+
59288+ /*
59289+ * We don't want to queue new requests for this device after flushing
59290+ * all pending requests to emulation, since new requests could then
59291+ * sneak in during this device reset phase, so take the lock now.
59292+ */
59293+ spin_lock_irqsave(&adapter->hw_lock, flags);
59294+
59295+ pvscsi_process_request_ring(adapter);
59296+ ll_device_reset(adapter, cmd->device->id);
59297+ pvscsi_process_completion_ring(adapter);
59298+
59299+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59300+
59301+ return SUCCESS;
59302+}
59303+
59304+static struct scsi_host_template pvscsi_template;
59305+
59306+static const char *pvscsi_info(struct Scsi_Host *host)
59307+{
59308+ struct pvscsi_adapter *adapter = shost_priv(host);
59309+ static char buf[256];
59310+
59311+ sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: "
59312+ "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev,
59313+ adapter->req_pages, adapter->cmp_pages, adapter->msg_pages,
59314+ pvscsi_template.cmd_per_lun);
59315+
59316+ return buf;
59317+}
59318+
59319+static struct scsi_host_template pvscsi_template = {
59320+ .module = THIS_MODULE,
59321+ .name = "VMware PVSCSI Host Adapter",
59322+ .proc_name = "vmw_pvscsi",
59323+ .info = pvscsi_info,
59324+ .queuecommand = pvscsi_queue,
59325+ .this_id = -1,
59326+ .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT,
59327+ .dma_boundary = UINT_MAX,
59328+ .max_sectors = 0xffff,
59329+ .use_clustering = ENABLE_CLUSTERING,
59330+ .eh_abort_handler = pvscsi_abort,
59331+ .eh_device_reset_handler = pvscsi_device_reset,
59332+ .eh_bus_reset_handler = pvscsi_bus_reset,
59333+ .eh_host_reset_handler = pvscsi_host_reset,
59334+};
59335+
59336+static void pvscsi_process_msg(const struct pvscsi_adapter *adapter,
59337+ const struct PVSCSIRingMsgDesc *e)
59338+{
59339+ struct PVSCSIRingsState *s = adapter->rings_state;
59340+ struct Scsi_Host *host = adapter->host;
59341+ struct scsi_device *sdev;
59342+
59343+ printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n",
59344+ e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2);
59345+
59346+ BUILD_BUG_ON(PVSCSI_MSG_LAST != 2);
59347+
59348+ if (e->type == PVSCSI_MSG_DEV_ADDED) {
59349+ struct PVSCSIMsgDescDevStatusChanged *desc;
59350+ desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
59351+
59352+ printk(KERN_INFO
59353+ "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n",
59354+ desc->bus, desc->target, desc->lun[1]);
59355+
59356+ if (!scsi_host_get(host))
59357+ return;
59358+
59359+ sdev = scsi_device_lookup(host, desc->bus, desc->target,
59360+ desc->lun[1]);
59361+ if (sdev) {
59362+ printk(KERN_INFO "vmw_pvscsi: device already exists\n");
59363+ scsi_device_put(sdev);
59364+ } else
59365+ scsi_add_device(adapter->host, desc->bus,
59366+ desc->target, desc->lun[1]);
59367+
59368+ scsi_host_put(host);
59369+ } else if (e->type == PVSCSI_MSG_DEV_REMOVED) {
59370+ struct PVSCSIMsgDescDevStatusChanged *desc;
59371+ desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
59372+
59373+ printk(KERN_INFO
59374+ "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n",
59375+ desc->bus, desc->target, desc->lun[1]);
59376+
59377+ if (!scsi_host_get(host))
59378+ return;
59379+
59380+ sdev = scsi_device_lookup(host, desc->bus, desc->target,
59381+ desc->lun[1]);
59382+ if (sdev) {
59383+ scsi_remove_device(sdev);
59384+ scsi_device_put(sdev);
59385+ } else
59386+ printk(KERN_INFO
59387+ "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n",
59388+ desc->bus, desc->target, desc->lun[1]);
59389+
59390+ scsi_host_put(host);
59391+ }
59392+}
59393+
59394+static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter)
59395+{
59396+ struct PVSCSIRingsState *s = adapter->rings_state;
59397+
59398+ return s->msgProdIdx != s->msgConsIdx;
59399+}
59400+
59401+static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter)
59402+{
59403+ struct PVSCSIRingsState *s = adapter->rings_state;
59404+ struct PVSCSIRingMsgDesc *ring = adapter->msg_ring;
59405+ u32 msg_entries = s->msgNumEntriesLog2;
59406+
59407+ while (pvscsi_msg_pending(adapter)) {
59408+ struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx &
59409+ MASK(msg_entries));
59410+
59411+ barrier();
59412+ pvscsi_process_msg(adapter, e);
59413+ barrier();
59414+ s->msgConsIdx++;
59415+ }
59416+}
59417+
59418+static void pvscsi_msg_workqueue_handler(struct work_struct *data)
59419+{
59420+ struct pvscsi_adapter *adapter;
59421+
59422+ adapter = container_of(data, struct pvscsi_adapter, work);
59423+
59424+ pvscsi_process_msg_ring(adapter);
59425+}
59426+
59427+static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
59428+{
59429+ char name[32];
59430+
59431+ if (!pvscsi_use_msg)
59432+ return 0;
59433+
59434+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
59435+ PVSCSI_CMD_SETUP_MSG_RING);
59436+
59437+ if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1)
59438+ return 0;
59439+
59440+ snprintf(name, sizeof(name),
59441+ "vmw_pvscsi_wq_%u", adapter->host->host_no);
59442+
59443+ adapter->workqueue = create_singlethread_workqueue(name);
59444+ if (!adapter->workqueue) {
59445+ printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n");
59446+ return 0;
59447+ }
59448+ INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler);
59449+
59450+ return 1;
59451+}
59452+
59453+static irqreturn_t pvscsi_isr(int irq, void *devp)
59454+{
59455+ struct pvscsi_adapter *adapter = devp;
59456+ int handled;
59457+
59458+ if (adapter->use_msi || adapter->use_msix)
59459+ handled = true;
59460+ else {
59461+ u32 val = pvscsi_read_intr_status(adapter);
59462+ handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0;
59463+ if (handled)
59464+ pvscsi_write_intr_status(devp, val);
59465+ }
59466+
59467+ if (handled) {
59468+ unsigned long flags;
59469+
59470+ spin_lock_irqsave(&adapter->hw_lock, flags);
59471+
59472+ pvscsi_process_completion_ring(adapter);
59473+ if (adapter->use_msg && pvscsi_msg_pending(adapter))
59474+ queue_work(adapter->workqueue, &adapter->work);
59475+
59476+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59477+ }
59478+
59479+ return IRQ_RETVAL(handled);
59480+}
59481+
59482+static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
59483+{
59484+ struct pvscsi_ctx *ctx = adapter->cmd_map;
59485+ unsigned i;
59486+
59487+ for (i = 0; i < adapter->req_depth; ++i, ++ctx)
59488+ kfree(ctx->sgl);
59489+}
59490+
59491+static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, int *irq)
59492+{
59493+ struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
59494+ int ret;
59495+
59496+ ret = pci_enable_msix(adapter->dev, &entry, 1);
59497+ if (ret)
59498+ return ret;
59499+
59500+ *irq = entry.vector;
59501+
59502+ return 0;
59503+}
59504+
59505+static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
59506+{
59507+ if (adapter->irq) {
59508+ free_irq(adapter->irq, adapter);
59509+ adapter->irq = 0;
59510+ }
59511+ if (adapter->use_msi) {
59512+ pci_disable_msi(adapter->dev);
59513+ adapter->use_msi = 0;
59514+ } else if (adapter->use_msix) {
59515+ pci_disable_msix(adapter->dev);
59516+ adapter->use_msix = 0;
59517+ }
59518+}
59519+
59520+static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
59521+{
59522+ pvscsi_shutdown_intr(adapter);
59523+
59524+ if (adapter->workqueue)
59525+ destroy_workqueue(adapter->workqueue);
59526+
59527+ if (adapter->mmioBase)
59528+ pci_iounmap(adapter->dev, adapter->mmioBase);
59529+
59530+ pci_release_regions(adapter->dev);
59531+
59532+ if (adapter->cmd_map) {
59533+ pvscsi_free_sgls(adapter);
59534+ kfree(adapter->cmd_map);
59535+ }
59536+
59537+ if (adapter->rings_state)
59538+ pci_free_consistent(adapter->dev, PAGE_SIZE,
59539+ adapter->rings_state, adapter->ringStatePA);
59540+
59541+ if (adapter->req_ring)
59542+ pci_free_consistent(adapter->dev,
59543+ adapter->req_pages * PAGE_SIZE,
59544+ adapter->req_ring, adapter->reqRingPA);
59545+
59546+ if (adapter->cmp_ring)
59547+ pci_free_consistent(adapter->dev,
59548+ adapter->cmp_pages * PAGE_SIZE,
59549+ adapter->cmp_ring, adapter->cmpRingPA);
59550+
59551+ if (adapter->msg_ring)
59552+ pci_free_consistent(adapter->dev,
59553+ adapter->msg_pages * PAGE_SIZE,
59554+ adapter->msg_ring, adapter->msgRingPA);
59555+}
59556+
59557+/*
59558+ * Allocate scatter gather lists.
59559+ *
59560+ * These are statically allocated. Trying to be clever was not worth it.
59561+ *
59562+ * Dynamic allocation can fail, and we can't go deeep into the memory
59563+ * allocator, since we're a SCSI driver, and trying too hard to allocate
59564+ * memory might generate disk I/O. We also don't want to fail disk I/O
59565+ * in that case because we can't get an allocation - the I/O could be
59566+ * trying to swap out data to free memory. Since that is pathological,
59567+ * just use a statically allocated scatter list.
59568+ *
59569+ */
59570+static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
59571+{
59572+ struct pvscsi_ctx *ctx;
59573+ int i;
59574+
59575+ ctx = adapter->cmd_map;
59576+ BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE);
59577+
59578+ for (i = 0; i < adapter->req_depth; ++i, ++ctx) {
59579+ ctx->sgl = kmalloc(SGL_SIZE, GFP_KERNEL);
59580+ ctx->sglPA = 0;
59581+ BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE));
59582+ if (!ctx->sgl) {
59583+ for (; i >= 0; --i, --ctx) {
59584+ kfree(ctx->sgl);
59585+ ctx->sgl = NULL;
59586+ }
59587+ return -ENOMEM;
59588+ }
59589+ }
59590+
59591+ return 0;
59592+}
59593+
59594+static int __devinit pvscsi_probe(struct pci_dev *pdev,
59595+ const struct pci_device_id *id)
59596+{
59597+ struct pvscsi_adapter *adapter;
59598+ struct Scsi_Host *host;
59599+ unsigned int i;
59600+ int error;
59601+
59602+ error = -ENODEV;
59603+
59604+ if (pci_enable_device(pdev))
59605+ return error;
59606+
59607+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
59608+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
59609+ printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
59610+ } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
59611+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
59612+ printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
59613+ } else {
59614+ printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
59615+ goto out_disable_device;
59616+ }
59617+
59618+ pvscsi_template.can_queue =
59619+ min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
59620+ PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
59621+ pvscsi_template.cmd_per_lun =
59622+ min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
59623+ host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
59624+ if (!host) {
59625+ printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
59626+ goto out_disable_device;
59627+ }
59628+
59629+ adapter = shost_priv(host);
59630+ memset(adapter, 0, sizeof(*adapter));
59631+ adapter->dev = pdev;
59632+ adapter->host = host;
59633+
59634+ spin_lock_init(&adapter->hw_lock);
59635+
59636+ host->max_channel = 0;
59637+ host->max_id = 16;
59638+ host->max_lun = 1;
59639+ host->max_cmd_len = 16;
59640+
59641+ adapter->rev = pdev->revision;
59642+
59643+ if (pci_request_regions(pdev, "vmw_pvscsi")) {
59644+ printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
59645+ goto out_free_host;
59646+ }
59647+
59648+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
59649+ if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
59650+ continue;
59651+
59652+ if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE)
59653+ continue;
59654+
59655+ break;
59656+ }
59657+
59658+ if (i == DEVICE_COUNT_RESOURCE) {
59659+ printk(KERN_ERR
59660+ "vmw_pvscsi: adapter has no suitable MMIO region\n");
59661+ goto out_release_resources;
59662+ }
59663+
59664+ adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);
59665+
59666+ if (!adapter->mmioBase) {
59667+ printk(KERN_ERR
59668+ "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
59669+ i, PVSCSI_MEM_SPACE_SIZE);
59670+ goto out_release_resources;
59671+ }
59672+
59673+ pci_set_master(pdev);
59674+ pci_set_drvdata(pdev, host);
59675+
59676+ ll_adapter_reset(adapter);
59677+
59678+ adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);
59679+
59680+ error = pvscsi_allocate_rings(adapter);
59681+ if (error) {
59682+ printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n");
59683+ goto out_release_resources;
59684+ }
59685+
59686+ /*
59687+ * From this point on we should reset the adapter if anything goes
59688+ * wrong.
59689+ */
59690+ pvscsi_setup_all_rings(adapter);
59691+
59692+ adapter->cmd_map = kcalloc(adapter->req_depth,
59693+ sizeof(struct pvscsi_ctx), GFP_KERNEL);
59694+ if (!adapter->cmd_map) {
59695+ printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n");
59696+ error = -ENOMEM;
59697+ goto out_reset_adapter;
59698+ }
59699+
59700+ INIT_LIST_HEAD(&adapter->cmd_pool);
59701+ for (i = 0; i < adapter->req_depth; i++) {
59702+ struct pvscsi_ctx *ctx = adapter->cmd_map + i;
59703+ list_add(&ctx->list, &adapter->cmd_pool);
59704+ }
59705+
59706+ error = pvscsi_allocate_sg(adapter);
59707+ if (error) {
59708+ printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n");
59709+ goto out_reset_adapter;
59710+ }
59711+
59712+ if (!pvscsi_disable_msix &&
59713+ pvscsi_setup_msix(adapter, &adapter->irq) == 0) {
59714+ printk(KERN_INFO "vmw_pvscsi: using MSI-X\n");
59715+ adapter->use_msix = 1;
59716+ } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) {
59717+ printk(KERN_INFO "vmw_pvscsi: using MSI\n");
59718+ adapter->use_msi = 1;
59719+ adapter->irq = pdev->irq;
59720+ } else {
59721+ printk(KERN_INFO "vmw_pvscsi: using INTx\n");
59722+ adapter->irq = pdev->irq;
59723+ }
59724+
59725+ error = request_irq(adapter->irq, pvscsi_isr, IRQF_SHARED,
59726+ "vmw_pvscsi", adapter);
59727+ if (error) {
59728+ printk(KERN_ERR
59729+ "vmw_pvscsi: unable to request IRQ: %d\n", error);
59730+ adapter->irq = 0;
59731+ goto out_reset_adapter;
59732+ }
59733+
59734+ error = scsi_add_host(host, &pdev->dev);
59735+ if (error) {
59736+ printk(KERN_ERR
59737+ "vmw_pvscsi: scsi_add_host failed: %d\n", error);
59738+ goto out_reset_adapter;
59739+ }
59740+
59741+ dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n",
59742+ adapter->rev, host->host_no);
59743+
59744+ pvscsi_unmask_intr(adapter);
59745+
59746+ scsi_scan_host(host);
59747+
59748+ return 0;
59749+
59750+out_reset_adapter:
59751+ ll_adapter_reset(adapter);
59752+out_release_resources:
59753+ pvscsi_release_resources(adapter);
59754+out_free_host:
59755+ scsi_host_put(host);
59756+out_disable_device:
59757+ pci_set_drvdata(pdev, NULL);
59758+ pci_disable_device(pdev);
59759+
59760+ return error;
59761+}
59762+
59763+static void __pvscsi_shutdown(struct pvscsi_adapter *adapter)
59764+{
59765+ pvscsi_mask_intr(adapter);
59766+
59767+ if (adapter->workqueue)
59768+ flush_workqueue(adapter->workqueue);
59769+
59770+ pvscsi_shutdown_intr(adapter);
59771+
59772+ pvscsi_process_request_ring(adapter);
59773+ pvscsi_process_completion_ring(adapter);
59774+ ll_adapter_reset(adapter);
59775+}
59776+
59777+static void pvscsi_shutdown(struct pci_dev *dev)
59778+{
59779+ struct Scsi_Host *host = pci_get_drvdata(dev);
59780+ struct pvscsi_adapter *adapter = shost_priv(host);
59781+
59782+ __pvscsi_shutdown(adapter);
59783+}
59784+
59785+static void pvscsi_remove(struct pci_dev *pdev)
59786+{
59787+ struct Scsi_Host *host = pci_get_drvdata(pdev);
59788+ struct pvscsi_adapter *adapter = shost_priv(host);
59789+
59790+ scsi_remove_host(host);
59791+
59792+ __pvscsi_shutdown(adapter);
59793+ pvscsi_release_resources(adapter);
59794+
59795+ scsi_host_put(host);
59796+
59797+ pci_set_drvdata(pdev, NULL);
59798+ pci_disable_device(pdev);
59799+}
59800+
59801+static struct pci_driver pvscsi_pci_driver = {
59802+ .name = "vmw_pvscsi",
59803+ .id_table = pvscsi_pci_tbl,
59804+ .probe = pvscsi_probe,
59805+ .remove = __devexit_p(pvscsi_remove),
59806+ .shutdown = pvscsi_shutdown,
59807+};
59808+
59809+static int __init pvscsi_init(void)
59810+{
59811+ pr_info("%s - version %s\n",
59812+ PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING);
59813+ return pci_register_driver(&pvscsi_pci_driver);
59814+}
59815+
59816+static void __exit pvscsi_exit(void)
59817+{
59818+ pci_unregister_driver(&pvscsi_pci_driver);
59819+}
59820+
59821+module_init(pvscsi_init);
59822+module_exit(pvscsi_exit);
59823diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
59824new file mode 100644
59825index 0000000..62e36e7
59826--- /dev/null
59827+++ b/drivers/scsi/vmw_pvscsi.h
59828@@ -0,0 +1,397 @@
59829+/*
59830+ * VMware PVSCSI header file
59831+ *
59832+ * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
59833+ *
59834+ * This program is free software; you can redistribute it and/or modify it
59835+ * under the terms of the GNU General Public License as published by the
59836+ * Free Software Foundation; version 2 of the License and no later version.
59837+ *
59838+ * This program is distributed in the hope that it will be useful, but
59839+ * WITHOUT ANY WARRANTY; without even the implied warranty of
59840+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
59841+ * NON INFRINGEMENT. See the GNU General Public License for more
59842+ * details.
59843+ *
59844+ * You should have received a copy of the GNU General Public License
59845+ * along with this program; if not, write to the Free Software
59846+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
59847+ *
59848+ * Maintained by: Alok N Kataria <akataria@vmware.com>
59849+ *
59850+ */
59851+
59852+#ifndef _VMW_PVSCSI_H_
59853+#define _VMW_PVSCSI_H_
59854+
59855+#include <linux/types.h>
59856+
59857+#define PVSCSI_DRIVER_VERSION_STRING "1.0.1.0-k"
59858+
59859+#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
59860+
59861+#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */
59862+
59863+#define PCI_VENDOR_ID_VMWARE 0x15AD
59864+#define PCI_DEVICE_ID_VMWARE_PVSCSI 0x07C0
59865+
59866+/*
59867+ * host adapter status/error codes
59868+ */
59869+enum HostBusAdapterStatus {
59870+ BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */
59871+ BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a,
59872+ BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b,
59873+ BTSTAT_DATA_UNDERRUN = 0x0c,
59874+ BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */
59875+ BTSTAT_DATARUN = 0x12, /* data overrun/underrun */
59876+ BTSTAT_BUSFREE = 0x13, /* unexpected bus free */
59877+ BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence requested by target */
59878+ BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from first CCB */
59879+ BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */
59880+ BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message rejected by target */
59881+ BTSTAT_BADMSG = 0x1d, /* unsupported message received by the host adapter */
59882+ BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */
59883+ BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN, sent a SCSI RST */
59884+ BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */
59885+ BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI RST */
59886+ BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly (w/o tag) */
59887+ BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */
59888+ BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */
59889+ BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */
59890+ BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */
59891+ BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */
59892+};
59893+
59894+/*
59895+ * Register offsets.
59896+ *
59897+ * These registers are accessible both via i/o space and mm i/o.
59898+ */
59899+
59900+enum PVSCSIRegOffset {
59901+ PVSCSI_REG_OFFSET_COMMAND = 0x0,
59902+ PVSCSI_REG_OFFSET_COMMAND_DATA = 0x4,
59903+ PVSCSI_REG_OFFSET_COMMAND_STATUS = 0x8,
59904+ PVSCSI_REG_OFFSET_LAST_STS_0 = 0x100,
59905+ PVSCSI_REG_OFFSET_LAST_STS_1 = 0x104,
59906+ PVSCSI_REG_OFFSET_LAST_STS_2 = 0x108,
59907+ PVSCSI_REG_OFFSET_LAST_STS_3 = 0x10c,
59908+ PVSCSI_REG_OFFSET_INTR_STATUS = 0x100c,
59909+ PVSCSI_REG_OFFSET_INTR_MASK = 0x2010,
59910+ PVSCSI_REG_OFFSET_KICK_NON_RW_IO = 0x3014,
59911+ PVSCSI_REG_OFFSET_DEBUG = 0x3018,
59912+ PVSCSI_REG_OFFSET_KICK_RW_IO = 0x4018,
59913+};
59914+
59915+/*
59916+ * Virtual h/w commands.
59917+ */
59918+
59919+enum PVSCSICommands {
59920+ PVSCSI_CMD_FIRST = 0, /* has to be first */
59921+
59922+ PVSCSI_CMD_ADAPTER_RESET = 1,
59923+ PVSCSI_CMD_ISSUE_SCSI = 2,
59924+ PVSCSI_CMD_SETUP_RINGS = 3,
59925+ PVSCSI_CMD_RESET_BUS = 4,
59926+ PVSCSI_CMD_RESET_DEVICE = 5,
59927+ PVSCSI_CMD_ABORT_CMD = 6,
59928+ PVSCSI_CMD_CONFIG = 7,
59929+ PVSCSI_CMD_SETUP_MSG_RING = 8,
59930+ PVSCSI_CMD_DEVICE_UNPLUG = 9,
59931+
59932+ PVSCSI_CMD_LAST = 10 /* has to be last */
59933+};
59934+
59935+/*
59936+ * Command descriptor for PVSCSI_CMD_RESET_DEVICE --
59937+ */
59938+
59939+struct PVSCSICmdDescResetDevice {
59940+ u32 target;
59941+ u8 lun[8];
59942+} __packed;
59943+
59944+/*
59945+ * Command descriptor for PVSCSI_CMD_ABORT_CMD --
59946+ *
59947+ * - currently does not support specifying the LUN.
59948+ * - _pad should be 0.
59949+ */
59950+
59951+struct PVSCSICmdDescAbortCmd {
59952+ u64 context;
59953+ u32 target;
59954+ u32 _pad;
59955+} __packed;
59956+
59957+/*
59958+ * Command descriptor for PVSCSI_CMD_SETUP_RINGS --
59959+ *
59960+ * Notes:
59961+ * - reqRingNumPages and cmpRingNumPages need to be power of two.
59962+ * - reqRingNumPages and cmpRingNumPages need to be different from 0,
59963+ * - reqRingNumPages and cmpRingNumPages need to be inferior to
59964+ * PVSCSI_SETUP_RINGS_MAX_NUM_PAGES.
59965+ */
59966+
59967+#define PVSCSI_SETUP_RINGS_MAX_NUM_PAGES 32
59968+struct PVSCSICmdDescSetupRings {
59969+ u32 reqRingNumPages;
59970+ u32 cmpRingNumPages;
59971+ u64 ringsStatePPN;
59972+ u64 reqRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
59973+ u64 cmpRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
59974+} __packed;
59975+
59976+/*
59977+ * Command descriptor for PVSCSI_CMD_SETUP_MSG_RING --
59978+ *
59979+ * Notes:
59980+ * - this command was not supported in the initial revision of the h/w
59981+ * interface. Before using it, you need to check that it is supported by
59982+ * writing PVSCSI_CMD_SETUP_MSG_RING to the 'command' register, then
59983+ * immediately after read the 'command status' register:
59984+ * * a value of -1 means that the cmd is NOT supported,
59985+ * * a value != -1 means that the cmd IS supported.
59986+ * If it's supported the 'command status' register should return:
59987+ * sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(u32).
59988+ * - this command should be issued _after_ the usual SETUP_RINGS so that the
59989+ * RingsState page is already setup. If not, the command is a nop.
59990+ * - numPages needs to be a power of two,
59991+ * - numPages needs to be different from 0,
59992+ * - _pad should be zero.
59993+ */
59994+
59995+#define PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES 16
59996+
59997+struct PVSCSICmdDescSetupMsgRing {
59998+ u32 numPages;
59999+ u32 _pad;
60000+ u64 ringPPNs[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES];
60001+} __packed;
60002+
60003+enum PVSCSIMsgType {
60004+ PVSCSI_MSG_DEV_ADDED = 0,
60005+ PVSCSI_MSG_DEV_REMOVED = 1,
60006+ PVSCSI_MSG_LAST = 2,
60007+};
60008+
60009+/*
60010+ * Msg descriptor.
60011+ *
60012+ * sizeof(struct PVSCSIRingMsgDesc) == 128.
60013+ *
60014+ * - type is of type enum PVSCSIMsgType.
60015+ * - the content of args depend on the type of event being delivered.
60016+ */
60017+
60018+struct PVSCSIRingMsgDesc {
60019+ u32 type;
60020+ u32 args[31];
60021+} __packed;
60022+
60023+struct PVSCSIMsgDescDevStatusChanged {
60024+ u32 type; /* PVSCSI_MSG_DEV _ADDED / _REMOVED */
60025+ u32 bus;
60026+ u32 target;
60027+ u8 lun[8];
60028+ u32 pad[27];
60029+} __packed;
60030+
60031+/*
60032+ * Rings state.
60033+ *
60034+ * - the fields:
60035+ * . msgProdIdx,
60036+ * . msgConsIdx,
60037+ * . msgNumEntriesLog2,
60038+ * .. are only used once the SETUP_MSG_RING cmd has been issued.
60039+ * - '_pad' helps to ensure that the msg related fields are on their own
60040+ * cache-line.
60041+ */
60042+
60043+struct PVSCSIRingsState {
60044+ u32 reqProdIdx;
60045+ u32 reqConsIdx;
60046+ u32 reqNumEntriesLog2;
60047+
60048+ u32 cmpProdIdx;
60049+ u32 cmpConsIdx;
60050+ u32 cmpNumEntriesLog2;
60051+
60052+ u8 _pad[104];
60053+
60054+ u32 msgProdIdx;
60055+ u32 msgConsIdx;
60056+ u32 msgNumEntriesLog2;
60057+} __packed;
60058+
60059+/*
60060+ * Request descriptor.
60061+ *
60062+ * sizeof(RingReqDesc) = 128
60063+ *
60064+ * - context: is a unique identifier of a command. It could normally be any
60065+ * 64bit value, however we currently store it in the serialNumber variable
60066+ * of struct SCSI_Command, so we have the following restrictions due to the
60067+ * way this field is handled in the vmkernel storage stack:
60068+ * * this value can't be 0,
60069+ * * the upper 32bit need to be 0 since serialNumber is as a u32.
60070+ * Currently tracked as PR 292060.
60071+ * - dataLen: contains the total number of bytes that need to be transferred.
60072+ * - dataAddr:
60073+ * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is set: dataAddr is the PA of the first
60074+ * s/g table segment, each s/g segment is entirely contained on a single
60075+ * page of physical memory,
60076+ * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is NOT set, then dataAddr is the PA of
60077+ * the buffer used for the DMA transfer,
60078+ * - flags:
60079+ * * PVSCSI_FLAG_CMD_WITH_SG_LIST: see dataAddr above,
60080+ * * PVSCSI_FLAG_CMD_DIR_NONE: no DMA involved,
60081+ * * PVSCSI_FLAG_CMD_DIR_TOHOST: transfer from device to main memory,
60082+ * * PVSCSI_FLAG_CMD_DIR_TODEVICE: transfer from main memory to device,
60083+ * * PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB: reserved to handle CDBs larger than
60084+ * 16bytes. To be specified.
60085+ * - vcpuHint: vcpuId of the processor that will be most likely waiting for the
60086+ * completion of the i/o. For guest OSes that use lowest priority message
60087+ * delivery mode (such as windows), we use this "hint" to deliver the
60088+ * completion action to the proper vcpu. For now, we can use the vcpuId of
60089+ * the processor that initiated the i/o as a likely candidate for the vcpu
60090+ * that will be waiting for the completion..
60091+ * - bus should be 0: we currently only support bus 0 for now.
60092+ * - unused should be zero'd.
60093+ */
60094+
60095+#define PVSCSI_FLAG_CMD_WITH_SG_LIST (1 << 0)
60096+#define PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB (1 << 1)
60097+#define PVSCSI_FLAG_CMD_DIR_NONE (1 << 2)
60098+#define PVSCSI_FLAG_CMD_DIR_TOHOST (1 << 3)
60099+#define PVSCSI_FLAG_CMD_DIR_TODEVICE (1 << 4)
60100+
60101+struct PVSCSIRingReqDesc {
60102+ u64 context;
60103+ u64 dataAddr;
60104+ u64 dataLen;
60105+ u64 senseAddr;
60106+ u32 senseLen;
60107+ u32 flags;
60108+ u8 cdb[16];
60109+ u8 cdbLen;
60110+ u8 lun[8];
60111+ u8 tag;
60112+ u8 bus;
60113+ u8 target;
60114+ u8 vcpuHint;
60115+ u8 unused[59];
60116+} __packed;
60117+
60118+/*
60119+ * Scatter-gather list management.
60120+ *
60121+ * As described above, when PVSCSI_FLAG_CMD_WITH_SG_LIST is set in the
60122+ * RingReqDesc.flags, then RingReqDesc.dataAddr is the PA of the first s/g
60123+ * table segment.
60124+ *
60125+ * - each segment of the s/g table contain a succession of struct
60126+ * PVSCSISGElement.
60127+ * - each segment is entirely contained on a single physical page of memory.
60128+ * - a "chain" s/g element has the flag PVSCSI_SGE_FLAG_CHAIN_ELEMENT set in
60129+ * PVSCSISGElement.flags and in this case:
60130+ * * addr is the PA of the next s/g segment,
60131+ * * length is undefined, assumed to be 0.
60132+ */
60133+
60134+struct PVSCSISGElement {
60135+ u64 addr;
60136+ u32 length;
60137+ u32 flags;
60138+} __packed;
60139+
60140+/*
60141+ * Completion descriptor.
60142+ *
60143+ * sizeof(RingCmpDesc) = 32
60144+ *
60145+ * - context: identifier of the command. The same thing that was specified
60146+ * under "context" as part of struct RingReqDesc at initiation time,
60147+ * - dataLen: number of bytes transferred for the actual i/o operation,
60148+ * - senseLen: number of bytes written into the sense buffer,
60149+ * - hostStatus: adapter status,
60150+ * - scsiStatus: device status,
60151+ * - _pad should be zero.
60152+ */
60153+
60154+struct PVSCSIRingCmpDesc {
60155+ u64 context;
60156+ u64 dataLen;
60157+ u32 senseLen;
60158+ u16 hostStatus;
60159+ u16 scsiStatus;
60160+ u32 _pad[2];
60161+} __packed;
60162+
60163+/*
60164+ * Interrupt status / IRQ bits.
60165+ */
60166+
60167+#define PVSCSI_INTR_CMPL_0 (1 << 0)
60168+#define PVSCSI_INTR_CMPL_1 (1 << 1)
60169+#define PVSCSI_INTR_CMPL_MASK MASK(2)
60170+
60171+#define PVSCSI_INTR_MSG_0 (1 << 2)
60172+#define PVSCSI_INTR_MSG_1 (1 << 3)
60173+#define PVSCSI_INTR_MSG_MASK (MASK(2) << 2)
60174+
60175+#define PVSCSI_INTR_ALL_SUPPORTED MASK(4)
60176+
60177+/*
60178+ * Number of MSI-X vectors supported.
60179+ */
60180+#define PVSCSI_MAX_INTRS 24
60181+
60182+/*
60183+ * Enumeration of supported MSI-X vectors
60184+ */
60185+#define PVSCSI_VECTOR_COMPLETION 0
60186+
60187+/*
60188+ * Misc constants for the rings.
60189+ */
60190+
60191+#define PVSCSI_MAX_NUM_PAGES_REQ_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
60192+#define PVSCSI_MAX_NUM_PAGES_CMP_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
60193+#define PVSCSI_MAX_NUM_PAGES_MSG_RING PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES
60194+
60195+#define PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE \
60196+ (PAGE_SIZE / sizeof(struct PVSCSIRingReqDesc))
60197+
60198+#define PVSCSI_MAX_REQ_QUEUE_DEPTH \
60199+ (PVSCSI_MAX_NUM_PAGES_REQ_RING * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE)
60200+
60201+#define PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES 1
60202+#define PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES 1
60203+#define PVSCSI_MEM_SPACE_MISC_NUM_PAGES 2
60204+#define PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES 2
60205+#define PVSCSI_MEM_SPACE_MSIX_NUM_PAGES 2
60206+
60207+enum PVSCSIMemSpace {
60208+ PVSCSI_MEM_SPACE_COMMAND_PAGE = 0,
60209+ PVSCSI_MEM_SPACE_INTR_STATUS_PAGE = 1,
60210+ PVSCSI_MEM_SPACE_MISC_PAGE = 2,
60211+ PVSCSI_MEM_SPACE_KICK_IO_PAGE = 4,
60212+ PVSCSI_MEM_SPACE_MSIX_TABLE_PAGE = 6,
60213+ PVSCSI_MEM_SPACE_MSIX_PBA_PAGE = 7,
60214+};
60215+
60216+#define PVSCSI_MEM_SPACE_NUM_PAGES \
60217+ (PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES + \
60218+ PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES + \
60219+ PVSCSI_MEM_SPACE_MISC_NUM_PAGES + \
60220+ PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES + \
60221+ PVSCSI_MEM_SPACE_MSIX_NUM_PAGES)
60222+
60223+#define PVSCSI_MEM_SPACE_SIZE (PVSCSI_MEM_SPACE_NUM_PAGES * PAGE_SIZE)
60224+
60225+#endif /* _VMW_PVSCSI_H_ */
60226diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
60227index eadc1ab..2d81457 100644
60228--- a/drivers/serial/kgdboc.c
60229+++ b/drivers/serial/kgdboc.c
60230@@ -18,7 +18,7 @@
60231
60232 #define MAX_CONFIG_LEN 40
60233
60234-static struct kgdb_io kgdboc_io_ops;
60235+static const struct kgdb_io kgdboc_io_ops;
60236
60237 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
60238 static int configured = -1;
60239@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void)
60240 module_put(THIS_MODULE);
60241 }
60242
60243-static struct kgdb_io kgdboc_io_ops = {
60244+static const struct kgdb_io kgdboc_io_ops = {
60245 .name = "kgdboc",
60246 .read_char = kgdboc_get_char,
60247 .write_char = kgdboc_put_char,
60248diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
60249index b76f246..7f41af7 100644
60250--- a/drivers/spi/spi.c
60251+++ b/drivers/spi/spi.c
60252@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
60253 EXPORT_SYMBOL_GPL(spi_sync);
60254
60255 /* portable code must never pass more than 32 bytes */
60256-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
60257+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
60258
60259 static u8 *buf;
60260
60261diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
60262index b9b37ff..19dfa23 100644
60263--- a/drivers/staging/android/binder.c
60264+++ b/drivers/staging/android/binder.c
60265@@ -2761,7 +2761,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
60266 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
60267 }
60268
60269-static struct vm_operations_struct binder_vm_ops = {
60270+static const struct vm_operations_struct binder_vm_ops = {
60271 .open = binder_vma_open,
60272 .close = binder_vma_close,
60273 };
60274diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
60275index cda26bb..39fed3f 100644
60276--- a/drivers/staging/b3dfg/b3dfg.c
60277+++ b/drivers/staging/b3dfg/b3dfg.c
60278@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma,
60279 return VM_FAULT_NOPAGE;
60280 }
60281
60282-static struct vm_operations_struct b3dfg_vm_ops = {
60283+static const struct vm_operations_struct b3dfg_vm_ops = {
60284 .fault = b3dfg_vma_fault,
60285 };
60286
60287@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
60288 return r;
60289 }
60290
60291-static struct file_operations b3dfg_fops = {
60292+static const struct file_operations b3dfg_fops = {
60293 .owner = THIS_MODULE,
60294 .open = b3dfg_open,
60295 .release = b3dfg_release,
60296diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
60297index 908f25a..c9a579b 100644
60298--- a/drivers/staging/comedi/comedi_fops.c
60299+++ b/drivers/staging/comedi/comedi_fops.c
60300@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area)
60301 mutex_unlock(&dev->mutex);
60302 }
60303
60304-static struct vm_operations_struct comedi_vm_ops = {
60305+static const struct vm_operations_struct comedi_vm_ops = {
60306 .close = comedi_unmap,
60307 };
60308
60309diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c
60310index e55a0db..577b776 100644
60311--- a/drivers/staging/dream/qdsp5/adsp_driver.c
60312+++ b/drivers/staging/dream/qdsp5/adsp_driver.c
60313@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode)
60314 static dev_t adsp_devno;
60315 static struct class *adsp_class;
60316
60317-static struct file_operations adsp_fops = {
60318+static const struct file_operations adsp_fops = {
60319 .owner = THIS_MODULE,
60320 .open = adsp_open,
60321 .unlocked_ioctl = adsp_ioctl,
60322diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c
60323index ad2390f..4116ee8 100644
60324--- a/drivers/staging/dream/qdsp5/audio_aac.c
60325+++ b/drivers/staging/dream/qdsp5/audio_aac.c
60326@@ -1022,7 +1022,7 @@ done:
60327 return rc;
60328 }
60329
60330-static struct file_operations audio_aac_fops = {
60331+static const struct file_operations audio_aac_fops = {
60332 .owner = THIS_MODULE,
60333 .open = audio_open,
60334 .release = audio_release,
60335diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c
60336index cd818a5..870b37b 100644
60337--- a/drivers/staging/dream/qdsp5/audio_amrnb.c
60338+++ b/drivers/staging/dream/qdsp5/audio_amrnb.c
60339@@ -833,7 +833,7 @@ done:
60340 return rc;
60341 }
60342
60343-static struct file_operations audio_amrnb_fops = {
60344+static const struct file_operations audio_amrnb_fops = {
60345 .owner = THIS_MODULE,
60346 .open = audamrnb_open,
60347 .release = audamrnb_release,
60348diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c
60349index 4b43e18..cedafda 100644
60350--- a/drivers/staging/dream/qdsp5/audio_evrc.c
60351+++ b/drivers/staging/dream/qdsp5/audio_evrc.c
60352@@ -805,7 +805,7 @@ dma_fail:
60353 return rc;
60354 }
60355
60356-static struct file_operations audio_evrc_fops = {
60357+static const struct file_operations audio_evrc_fops = {
60358 .owner = THIS_MODULE,
60359 .open = audevrc_open,
60360 .release = audevrc_release,
60361diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c
60362index 3d950a2..9431118 100644
60363--- a/drivers/staging/dream/qdsp5/audio_in.c
60364+++ b/drivers/staging/dream/qdsp5/audio_in.c
60365@@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file)
60366 return 0;
60367 }
60368
60369-static struct file_operations audio_fops = {
60370+static const struct file_operations audio_fops = {
60371 .owner = THIS_MODULE,
60372 .open = audio_in_open,
60373 .release = audio_in_release,
60374@@ -922,7 +922,7 @@ static struct file_operations audio_fops = {
60375 .unlocked_ioctl = audio_in_ioctl,
60376 };
60377
60378-static struct file_operations audpre_fops = {
60379+static const struct file_operations audpre_fops = {
60380 .owner = THIS_MODULE,
60381 .open = audpre_open,
60382 .unlocked_ioctl = audpre_ioctl,
60383diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c
60384index b95574f..286c2f4 100644
60385--- a/drivers/staging/dream/qdsp5/audio_mp3.c
60386+++ b/drivers/staging/dream/qdsp5/audio_mp3.c
60387@@ -941,7 +941,7 @@ done:
60388 return rc;
60389 }
60390
60391-static struct file_operations audio_mp3_fops = {
60392+static const struct file_operations audio_mp3_fops = {
60393 .owner = THIS_MODULE,
60394 .open = audio_open,
60395 .release = audio_release,
60396diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
60397index d1adcf6..f8f9833 100644
60398--- a/drivers/staging/dream/qdsp5/audio_out.c
60399+++ b/drivers/staging/dream/qdsp5/audio_out.c
60400@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file)
60401 return 0;
60402 }
60403
60404-static struct file_operations audio_fops = {
60405+static const struct file_operations audio_fops = {
60406 .owner = THIS_MODULE,
60407 .open = audio_open,
60408 .release = audio_release,
60409@@ -819,7 +819,7 @@ static struct file_operations audio_fops = {
60410 .unlocked_ioctl = audio_ioctl,
60411 };
60412
60413-static struct file_operations audpp_fops = {
60414+static const struct file_operations audpp_fops = {
60415 .owner = THIS_MODULE,
60416 .open = audpp_open,
60417 .unlocked_ioctl = audpp_ioctl,
60418diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c
60419index f0f50e3..f6b9dbc 100644
60420--- a/drivers/staging/dream/qdsp5/audio_qcelp.c
60421+++ b/drivers/staging/dream/qdsp5/audio_qcelp.c
60422@@ -816,7 +816,7 @@ err:
60423 return rc;
60424 }
60425
60426-static struct file_operations audio_qcelp_fops = {
60427+static const struct file_operations audio_qcelp_fops = {
60428 .owner = THIS_MODULE,
60429 .open = audqcelp_open,
60430 .release = audqcelp_release,
60431diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c
60432index 037d7ff..5469ec3 100644
60433--- a/drivers/staging/dream/qdsp5/snd.c
60434+++ b/drivers/staging/dream/qdsp5/snd.c
60435@@ -242,7 +242,7 @@ err:
60436 return rc;
60437 }
60438
60439-static struct file_operations snd_fops = {
60440+static const struct file_operations snd_fops = {
60441 .owner = THIS_MODULE,
60442 .open = snd_open,
60443 .release = snd_release,
60444diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
60445index d4e7d88..0ea632a 100644
60446--- a/drivers/staging/dream/smd/smd_qmi.c
60447+++ b/drivers/staging/dream/smd/smd_qmi.c
60448@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp)
60449 return 0;
60450 }
60451
60452-static struct file_operations qmi_fops = {
60453+static const struct file_operations qmi_fops = {
60454 .owner = THIS_MODULE,
60455 .read = qmi_read,
60456 .write = qmi_write,
60457diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
60458index cd3910b..ff053d3 100644
60459--- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
60460+++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c
60461@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
60462 return rc;
60463 }
60464
60465-static struct file_operations rpcrouter_server_fops = {
60466+static const struct file_operations rpcrouter_server_fops = {
60467 .owner = THIS_MODULE,
60468 .open = rpcrouter_open,
60469 .release = rpcrouter_release,
60470@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = {
60471 .unlocked_ioctl = rpcrouter_ioctl,
60472 };
60473
60474-static struct file_operations rpcrouter_router_fops = {
60475+static const struct file_operations rpcrouter_router_fops = {
60476 .owner = THIS_MODULE,
60477 .open = rpcrouter_open,
60478 .release = rpcrouter_release,
60479diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
60480index c24e4e0..07665be 100644
60481--- a/drivers/staging/dst/dcore.c
60482+++ b/drivers/staging/dst/dcore.c
60483@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
60484 return 0;
60485 }
60486
60487-static struct block_device_operations dst_blk_ops = {
60488+static const struct block_device_operations dst_blk_ops = {
60489 .open = dst_bdev_open,
60490 .release = dst_bdev_release,
60491 .owner = THIS_MODULE,
60492@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
60493 n->size = ctl->size;
60494
60495 atomic_set(&n->refcnt, 1);
60496- atomic_long_set(&n->gen, 0);
60497+ atomic_long_set_unchecked(&n->gen, 0);
60498 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
60499
60500 err = dst_node_sysfs_init(n);
60501diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
60502index 557d372..8d84422 100644
60503--- a/drivers/staging/dst/trans.c
60504+++ b/drivers/staging/dst/trans.c
60505@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio)
60506 t->error = 0;
60507 t->retries = 0;
60508 atomic_set(&t->refcnt, 1);
60509- t->gen = atomic_long_inc_return(&n->gen);
60510+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
60511
60512 t->enc = bio_data_dir(bio);
60513 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
60514diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
60515index 94f7752..d051514 100644
60516--- a/drivers/staging/et131x/et1310_tx.c
60517+++ b/drivers/staging/et131x/et1310_tx.c
60518@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
60519 struct net_device_stats *stats = &etdev->net_stats;
60520
60521 if (pMpTcb->Flags & fMP_DEST_BROAD)
60522- atomic_inc(&etdev->Stats.brdcstxmt);
60523+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
60524 else if (pMpTcb->Flags & fMP_DEST_MULTI)
60525- atomic_inc(&etdev->Stats.multixmt);
60526+ atomic_inc_unchecked(&etdev->Stats.multixmt);
60527 else
60528- atomic_inc(&etdev->Stats.unixmt);
60529+ atomic_inc_unchecked(&etdev->Stats.unixmt);
60530
60531 if (pMpTcb->Packet) {
60532 stats->tx_bytes += pMpTcb->Packet->len;
60533diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
60534index 1dfe06f..f469b4d 100644
60535--- a/drivers/staging/et131x/et131x_adapter.h
60536+++ b/drivers/staging/et131x/et131x_adapter.h
60537@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
60538 * operations
60539 */
60540 u32 unircv; /* # multicast packets received */
60541- atomic_t unixmt; /* # multicast packets for Tx */
60542+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
60543 u32 multircv; /* # multicast packets received */
60544- atomic_t multixmt; /* # multicast packets for Tx */
60545+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
60546 u32 brdcstrcv; /* # broadcast packets received */
60547- atomic_t brdcstxmt; /* # broadcast packets for Tx */
60548+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
60549 u32 norcvbuf; /* # Rx packets discarded */
60550 u32 noxmtbuf; /* # Tx packets discarded */
60551
60552diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
60553index 4bd353a..e28f455 100644
60554--- a/drivers/staging/go7007/go7007-v4l2.c
60555+++ b/drivers/staging/go7007/go7007-v4l2.c
60556@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
60557 return 0;
60558 }
60559
60560-static struct vm_operations_struct go7007_vm_ops = {
60561+static const struct vm_operations_struct go7007_vm_ops = {
60562 .open = go7007_vm_open,
60563 .close = go7007_vm_close,
60564 .fault = go7007_vm_fault,
60565diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
60566index 366dc95..b974d87 100644
60567--- a/drivers/staging/hv/Channel.c
60568+++ b/drivers/staging/hv/Channel.c
60569@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
60570
60571 DPRINT_ENTER(VMBUS);
60572
60573- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
60574- atomic_inc(&gVmbusConnection.NextGpadlHandle);
60575+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
60576+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
60577
60578 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
60579 ASSERT(msgInfo != NULL);
60580diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
60581index b12237f..01ae28a 100644
60582--- a/drivers/staging/hv/Hv.c
60583+++ b/drivers/staging/hv/Hv.c
60584@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
60585 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
60586 u32 outputAddressHi = outputAddress >> 32;
60587 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
60588- volatile void *hypercallPage = gHvContext.HypercallPage;
60589+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
60590
60591 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
60592 Control, Input, Output);
60593diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h
60594index d089bb1..2ebc158 100644
60595--- a/drivers/staging/hv/VmbusApi.h
60596+++ b/drivers/staging/hv/VmbusApi.h
60597@@ -109,7 +109,7 @@ struct vmbus_channel_interface {
60598 u32 *GpadlHandle);
60599 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
60600 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
60601-};
60602+} __no_const;
60603
60604 /* Base driver object */
60605 struct hv_driver {
60606diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h
60607index 5a37cce..6ecc88c 100644
60608--- a/drivers/staging/hv/VmbusPrivate.h
60609+++ b/drivers/staging/hv/VmbusPrivate.h
60610@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
60611 struct VMBUS_CONNECTION {
60612 enum VMBUS_CONNECT_STATE ConnectState;
60613
60614- atomic_t NextGpadlHandle;
60615+ atomic_unchecked_t NextGpadlHandle;
60616
60617 /*
60618 * Represents channel interrupts. Each bit position represents a
60619diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
60620index 871a202..ca50ddf 100644
60621--- a/drivers/staging/hv/blkvsc_drv.c
60622+++ b/drivers/staging/hv/blkvsc_drv.c
60623@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
60624 /* The one and only one */
60625 static struct blkvsc_driver_context g_blkvsc_drv;
60626
60627-static struct block_device_operations block_ops = {
60628+static const struct block_device_operations block_ops = {
60629 .owner = THIS_MODULE,
60630 .open = blkvsc_open,
60631 .release = blkvsc_release,
60632diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
60633index 6acc49a..fbc8d46 100644
60634--- a/drivers/staging/hv/vmbus_drv.c
60635+++ b/drivers/staging/hv/vmbus_drv.c
60636@@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
60637 to_device_context(root_device_obj);
60638 struct device_context *child_device_ctx =
60639 to_device_context(child_device_obj);
60640- static atomic_t device_num = ATOMIC_INIT(0);
60641+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
60642
60643 DPRINT_ENTER(VMBUS_DRV);
60644
60645@@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
60646
60647 /* Set the device name. Otherwise, device_register() will fail. */
60648 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
60649- atomic_inc_return(&device_num));
60650+ atomic_inc_return_unchecked(&device_num));
60651
60652 /* The new device belongs to this bus */
60653 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
60654diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
60655index d926189..17b19fd 100644
60656--- a/drivers/staging/iio/ring_generic.h
60657+++ b/drivers/staging/iio/ring_generic.h
60658@@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
60659
60660 int (*is_enabled)(struct iio_ring_buffer *ring);
60661 int (*enable)(struct iio_ring_buffer *ring);
60662-};
60663+} __no_const;
60664
60665 /**
60666 * struct iio_ring_buffer - general ring buffer structure
60667diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
60668index 1b237b7..88c624e 100644
60669--- a/drivers/staging/octeon/ethernet-rx.c
60670+++ b/drivers/staging/octeon/ethernet-rx.c
60671@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused)
60672 /* Increment RX stats for virtual ports */
60673 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
60674 #ifdef CONFIG_64BIT
60675- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
60676- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
60677+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
60678+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
60679 #else
60680- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
60681- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
60682+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
60683+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
60684 #endif
60685 }
60686 netif_receive_skb(skb);
60687@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused)
60688 dev->name);
60689 */
60690 #ifdef CONFIG_64BIT
60691- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
60692+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
60693 #else
60694- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
60695+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
60696 #endif
60697 dev_kfree_skb_irq(skb);
60698 }
60699diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
60700index 492c502..d9909f1 100644
60701--- a/drivers/staging/octeon/ethernet.c
60702+++ b/drivers/staging/octeon/ethernet.c
60703@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
60704 * since the RX tasklet also increments it.
60705 */
60706 #ifdef CONFIG_64BIT
60707- atomic64_add(rx_status.dropped_packets,
60708- (atomic64_t *)&priv->stats.rx_dropped);
60709+ atomic64_add_unchecked(rx_status.dropped_packets,
60710+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
60711 #else
60712- atomic_add(rx_status.dropped_packets,
60713- (atomic_t *)&priv->stats.rx_dropped);
60714+ atomic_add_unchecked(rx_status.dropped_packets,
60715+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
60716 #endif
60717 }
60718
60719diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h
60720index a35bd5d..28fff45 100644
60721--- a/drivers/staging/otus/80211core/pub_zfi.h
60722+++ b/drivers/staging/otus/80211core/pub_zfi.h
60723@@ -531,7 +531,7 @@ struct zsCbFuncTbl
60724 u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf);
60725
60726 void (*zfcbHwWatchDogNotify)(zdev_t* dev);
60727-};
60728+} __no_const;
60729
60730 extern void zfZeroMemory(u8_t* va, u16_t length);
60731 #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl));
60732diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
60733index c39a25f..696f5aa 100644
60734--- a/drivers/staging/panel/panel.c
60735+++ b/drivers/staging/panel/panel.c
60736@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file)
60737 return 0;
60738 }
60739
60740-static struct file_operations lcd_fops = {
60741+static const struct file_operations lcd_fops = {
60742 .write = lcd_write,
60743 .open = lcd_open,
60744 .release = lcd_release,
60745@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file)
60746 return 0;
60747 }
60748
60749-static struct file_operations keypad_fops = {
60750+static const struct file_operations keypad_fops = {
60751 .read = keypad_read, /* read */
60752 .open = keypad_open, /* open */
60753 .release = keypad_release, /* close */
60754diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
60755index 270ebcb..37e46af 100644
60756--- a/drivers/staging/phison/phison.c
60757+++ b/drivers/staging/phison/phison.c
60758@@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = {
60759 ATA_BMDMA_SHT(DRV_NAME),
60760 };
60761
60762-static struct ata_port_operations phison_ops = {
60763+static const struct ata_port_operations phison_ops = {
60764 .inherits = &ata_bmdma_port_ops,
60765 .prereset = phison_pre_reset,
60766 };
60767diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
60768index 2eb8e3d..57616a7 100644
60769--- a/drivers/staging/poch/poch.c
60770+++ b/drivers/staging/poch/poch.c
60771@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
60772 return 0;
60773 }
60774
60775-static struct file_operations poch_fops = {
60776+static const struct file_operations poch_fops = {
60777 .owner = THIS_MODULE,
60778 .open = poch_open,
60779 .release = poch_release,
60780diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
60781index c94de31..19402bc 100644
60782--- a/drivers/staging/pohmelfs/inode.c
60783+++ b/drivers/staging/pohmelfs/inode.c
60784@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
60785 mutex_init(&psb->mcache_lock);
60786 psb->mcache_root = RB_ROOT;
60787 psb->mcache_timeout = msecs_to_jiffies(5000);
60788- atomic_long_set(&psb->mcache_gen, 0);
60789+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
60790
60791 psb->trans_max_pages = 100;
60792
60793@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
60794 INIT_LIST_HEAD(&psb->crypto_ready_list);
60795 INIT_LIST_HEAD(&psb->crypto_active_list);
60796
60797- atomic_set(&psb->trans_gen, 1);
60798+ atomic_set_unchecked(&psb->trans_gen, 1);
60799 atomic_long_set(&psb->total_inodes, 0);
60800
60801 mutex_init(&psb->state_lock);
60802diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
60803index e22665c..a2a9390 100644
60804--- a/drivers/staging/pohmelfs/mcache.c
60805+++ b/drivers/staging/pohmelfs/mcache.c
60806@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
60807 m->data = data;
60808 m->start = start;
60809 m->size = size;
60810- m->gen = atomic_long_inc_return(&psb->mcache_gen);
60811+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
60812
60813 mutex_lock(&psb->mcache_lock);
60814 err = pohmelfs_mcache_insert(psb, m);
60815diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
60816index 623a07d..4035c19 100644
60817--- a/drivers/staging/pohmelfs/netfs.h
60818+++ b/drivers/staging/pohmelfs/netfs.h
60819@@ -570,14 +570,14 @@ struct pohmelfs_config;
60820 struct pohmelfs_sb {
60821 struct rb_root mcache_root;
60822 struct mutex mcache_lock;
60823- atomic_long_t mcache_gen;
60824+ atomic_long_unchecked_t mcache_gen;
60825 unsigned long mcache_timeout;
60826
60827 unsigned int idx;
60828
60829 unsigned int trans_retries;
60830
60831- atomic_t trans_gen;
60832+ atomic_unchecked_t trans_gen;
60833
60834 unsigned int crypto_attached_size;
60835 unsigned int crypto_align_size;
60836diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
60837index 36a2535..0591bf4 100644
60838--- a/drivers/staging/pohmelfs/trans.c
60839+++ b/drivers/staging/pohmelfs/trans.c
60840@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
60841 int err;
60842 struct netfs_cmd *cmd = t->iovec.iov_base;
60843
60844- t->gen = atomic_inc_return(&psb->trans_gen);
60845+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
60846
60847 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
60848 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
60849diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
60850index f890a16..509ece8 100644
60851--- a/drivers/staging/sep/sep_driver.c
60852+++ b/drivers/staging/sep/sep_driver.c
60853@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = {
60854 static dev_t sep_devno;
60855
60856 /* the files operations structure of the driver */
60857-static struct file_operations sep_file_operations = {
60858+static const struct file_operations sep_file_operations = {
60859 .owner = THIS_MODULE,
60860 .ioctl = sep_ioctl,
60861 .poll = sep_poll,
60862diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
60863index 5e16bc3..7655b10 100644
60864--- a/drivers/staging/usbip/usbip_common.h
60865+++ b/drivers/staging/usbip/usbip_common.h
60866@@ -374,7 +374,7 @@ struct usbip_device {
60867 void (*shutdown)(struct usbip_device *);
60868 void (*reset)(struct usbip_device *);
60869 void (*unusable)(struct usbip_device *);
60870- } eh_ops;
60871+ } __no_const eh_ops;
60872 };
60873
60874
60875diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
60876index 57f7946..d9df23d 100644
60877--- a/drivers/staging/usbip/vhci.h
60878+++ b/drivers/staging/usbip/vhci.h
60879@@ -92,7 +92,7 @@ struct vhci_hcd {
60880 unsigned resuming:1;
60881 unsigned long re_timeout;
60882
60883- atomic_t seqnum;
60884+ atomic_unchecked_t seqnum;
60885
60886 /*
60887 * NOTE:
60888diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
60889index 20cd7db..c2693ff 100644
60890--- a/drivers/staging/usbip/vhci_hcd.c
60891+++ b/drivers/staging/usbip/vhci_hcd.c
60892@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
60893 return;
60894 }
60895
60896- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
60897+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
60898 if (priv->seqnum == 0xffff)
60899 usbip_uinfo("seqnum max\n");
60900
60901@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
60902 return -ENOMEM;
60903 }
60904
60905- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
60906+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
60907 if (unlink->seqnum == 0xffff)
60908 usbip_uinfo("seqnum max\n");
60909
60910@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd)
60911 vdev->rhport = rhport;
60912 }
60913
60914- atomic_set(&vhci->seqnum, 0);
60915+ atomic_set_unchecked(&vhci->seqnum, 0);
60916 spin_lock_init(&vhci->lock);
60917
60918
60919diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
60920index 7fd76fe..673695a 100644
60921--- a/drivers/staging/usbip/vhci_rx.c
60922+++ b/drivers/staging/usbip/vhci_rx.c
60923@@ -79,7 +79,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
60924 usbip_uerr("cannot find a urb of seqnum %u\n",
60925 pdu->base.seqnum);
60926 usbip_uinfo("max seqnum %d\n",
60927- atomic_read(&the_controller->seqnum));
60928+ atomic_read_unchecked(&the_controller->seqnum));
60929 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
60930 return;
60931 }
60932diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
60933index 7891288..8e31300 100644
60934--- a/drivers/staging/vme/devices/vme_user.c
60935+++ b/drivers/staging/vme/devices/vme_user.c
60936@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
60937 static int __init vme_user_probe(struct device *, int, int);
60938 static int __exit vme_user_remove(struct device *, int, int);
60939
60940-static struct file_operations vme_user_fops = {
60941+static const struct file_operations vme_user_fops = {
60942 .open = vme_user_open,
60943 .release = vme_user_release,
60944 .read = vme_user_read,
60945diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
60946index 58abf44..00c1fc8 100644
60947--- a/drivers/staging/vt6655/hostap.c
60948+++ b/drivers/staging/vt6655/hostap.c
60949@@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
60950 PSDevice apdev_priv;
60951 struct net_device *dev = pDevice->dev;
60952 int ret;
60953- const struct net_device_ops apdev_netdev_ops = {
60954+ net_device_ops_no_const apdev_netdev_ops = {
60955 .ndo_start_xmit = pDevice->tx_80211,
60956 };
60957
60958diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
60959index 0c8267a..db1f363 100644
60960--- a/drivers/staging/vt6656/hostap.c
60961+++ b/drivers/staging/vt6656/hostap.c
60962@@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
60963 PSDevice apdev_priv;
60964 struct net_device *dev = pDevice->dev;
60965 int ret;
60966- const struct net_device_ops apdev_netdev_ops = {
60967+ net_device_ops_no_const apdev_netdev_ops = {
60968 .ndo_start_xmit = pDevice->tx_80211,
60969 };
60970
60971diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
60972index 925678b..da7f5ed 100644
60973--- a/drivers/staging/wlan-ng/hfa384x_usb.c
60974+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
60975@@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
60976
60977 struct usbctlx_completor {
60978 int (*complete) (struct usbctlx_completor *);
60979-};
60980+} __no_const;
60981 typedef struct usbctlx_completor usbctlx_completor_t;
60982
60983 static int
60984diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
60985index 40de151..924f268 100644
60986--- a/drivers/telephony/ixj.c
60987+++ b/drivers/telephony/ixj.c
60988@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
60989 bool mContinue;
60990 char *pIn, *pOut;
60991
60992+ pax_track_stack();
60993+
60994 if (!SCI_Prepare(j))
60995 return 0;
60996
60997diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
60998index e941367..b631f5a 100644
60999--- a/drivers/uio/uio.c
61000+++ b/drivers/uio/uio.c
61001@@ -23,6 +23,7 @@
61002 #include <linux/string.h>
61003 #include <linux/kobject.h>
61004 #include <linux/uio_driver.h>
61005+#include <asm/local.h>
61006
61007 #define UIO_MAX_DEVICES 255
61008
61009@@ -30,10 +31,10 @@ struct uio_device {
61010 struct module *owner;
61011 struct device *dev;
61012 int minor;
61013- atomic_t event;
61014+ atomic_unchecked_t event;
61015 struct fasync_struct *async_queue;
61016 wait_queue_head_t wait;
61017- int vma_count;
61018+ local_t vma_count;
61019 struct uio_info *info;
61020 struct kobject *map_dir;
61021 struct kobject *portio_dir;
61022@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
61023 return entry->show(mem, buf);
61024 }
61025
61026-static struct sysfs_ops map_sysfs_ops = {
61027+static const struct sysfs_ops map_sysfs_ops = {
61028 .show = map_type_show,
61029 };
61030
61031@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
61032 return entry->show(port, buf);
61033 }
61034
61035-static struct sysfs_ops portio_sysfs_ops = {
61036+static const struct sysfs_ops portio_sysfs_ops = {
61037 .show = portio_type_show,
61038 };
61039
61040@@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev,
61041 struct uio_device *idev = dev_get_drvdata(dev);
61042 if (idev)
61043 return sprintf(buf, "%u\n",
61044- (unsigned int)atomic_read(&idev->event));
61045+ (unsigned int)atomic_read_unchecked(&idev->event));
61046 else
61047 return -ENODEV;
61048 }
61049@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info)
61050 {
61051 struct uio_device *idev = info->uio_dev;
61052
61053- atomic_inc(&idev->event);
61054+ atomic_inc_unchecked(&idev->event);
61055 wake_up_interruptible(&idev->wait);
61056 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
61057 }
61058@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep)
61059 }
61060
61061 listener->dev = idev;
61062- listener->event_count = atomic_read(&idev->event);
61063+ listener->event_count = atomic_read_unchecked(&idev->event);
61064 filep->private_data = listener;
61065
61066 if (idev->info->open) {
61067@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
61068 return -EIO;
61069
61070 poll_wait(filep, &idev->wait, wait);
61071- if (listener->event_count != atomic_read(&idev->event))
61072+ if (listener->event_count != atomic_read_unchecked(&idev->event))
61073 return POLLIN | POLLRDNORM;
61074 return 0;
61075 }
61076@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
61077 do {
61078 set_current_state(TASK_INTERRUPTIBLE);
61079
61080- event_count = atomic_read(&idev->event);
61081+ event_count = atomic_read_unchecked(&idev->event);
61082 if (event_count != listener->event_count) {
61083 if (copy_to_user(buf, &event_count, count))
61084 retval = -EFAULT;
61085@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
61086 static void uio_vma_open(struct vm_area_struct *vma)
61087 {
61088 struct uio_device *idev = vma->vm_private_data;
61089- idev->vma_count++;
61090+ local_inc(&idev->vma_count);
61091 }
61092
61093 static void uio_vma_close(struct vm_area_struct *vma)
61094 {
61095 struct uio_device *idev = vma->vm_private_data;
61096- idev->vma_count--;
61097+ local_dec(&idev->vma_count);
61098 }
61099
61100 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
61101@@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner,
61102 idev->owner = owner;
61103 idev->info = info;
61104 init_waitqueue_head(&idev->wait);
61105- atomic_set(&idev->event, 0);
61106+ atomic_set_unchecked(&idev->event, 0);
61107
61108 ret = uio_get_minor(idev);
61109 if (ret)
61110diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
61111index fbea856..06efea6 100644
61112--- a/drivers/usb/atm/usbatm.c
61113+++ b/drivers/usb/atm/usbatm.c
61114@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
61115 if (printk_ratelimit())
61116 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
61117 __func__, vpi, vci);
61118- atomic_inc(&vcc->stats->rx_err);
61119+ atomic_inc_unchecked(&vcc->stats->rx_err);
61120 return;
61121 }
61122
61123@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
61124 if (length > ATM_MAX_AAL5_PDU) {
61125 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
61126 __func__, length, vcc);
61127- atomic_inc(&vcc->stats->rx_err);
61128+ atomic_inc_unchecked(&vcc->stats->rx_err);
61129 goto out;
61130 }
61131
61132@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
61133 if (sarb->len < pdu_length) {
61134 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
61135 __func__, pdu_length, sarb->len, vcc);
61136- atomic_inc(&vcc->stats->rx_err);
61137+ atomic_inc_unchecked(&vcc->stats->rx_err);
61138 goto out;
61139 }
61140
61141 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
61142 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
61143 __func__, vcc);
61144- atomic_inc(&vcc->stats->rx_err);
61145+ atomic_inc_unchecked(&vcc->stats->rx_err);
61146 goto out;
61147 }
61148
61149@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
61150 if (printk_ratelimit())
61151 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
61152 __func__, length);
61153- atomic_inc(&vcc->stats->rx_drop);
61154+ atomic_inc_unchecked(&vcc->stats->rx_drop);
61155 goto out;
61156 }
61157
61158@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
61159
61160 vcc->push(vcc, skb);
61161
61162- atomic_inc(&vcc->stats->rx);
61163+ atomic_inc_unchecked(&vcc->stats->rx);
61164 out:
61165 skb_trim(sarb, 0);
61166 }
61167@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data)
61168 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
61169
61170 usbatm_pop(vcc, skb);
61171- atomic_inc(&vcc->stats->tx);
61172+ atomic_inc_unchecked(&vcc->stats->tx);
61173
61174 skb = skb_dequeue(&instance->sndqueue);
61175 }
61176@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
61177 if (!left--)
61178 return sprintf(page,
61179 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
61180- atomic_read(&atm_dev->stats.aal5.tx),
61181- atomic_read(&atm_dev->stats.aal5.tx_err),
61182- atomic_read(&atm_dev->stats.aal5.rx),
61183- atomic_read(&atm_dev->stats.aal5.rx_err),
61184- atomic_read(&atm_dev->stats.aal5.rx_drop));
61185+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
61186+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
61187+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
61188+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
61189+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
61190
61191 if (!left--) {
61192 if (instance->disconnected)
61193diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
61194index 24e6205..fe5a5d4 100644
61195--- a/drivers/usb/core/hcd.c
61196+++ b/drivers/usb/core/hcd.c
61197@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
61198
61199 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
61200
61201-struct usb_mon_operations *mon_ops;
61202+const struct usb_mon_operations *mon_ops;
61203
61204 /*
61205 * The registration is unlocked.
61206@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
61207 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
61208 */
61209
61210-int usb_mon_register (struct usb_mon_operations *ops)
61211+int usb_mon_register (const struct usb_mon_operations *ops)
61212 {
61213
61214 if (mon_ops)
61215diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
61216index bcbe104..9cfd1c6 100644
61217--- a/drivers/usb/core/hcd.h
61218+++ b/drivers/usb/core/hcd.h
61219@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { }
61220 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
61221
61222 struct usb_mon_operations {
61223- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
61224- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
61225- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
61226+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
61227+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
61228+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
61229 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
61230 };
61231
61232-extern struct usb_mon_operations *mon_ops;
61233+extern const struct usb_mon_operations *mon_ops;
61234
61235 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
61236 {
61237@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
61238 (*mon_ops->urb_complete)(bus, urb, status);
61239 }
61240
61241-int usb_mon_register(struct usb_mon_operations *ops);
61242+int usb_mon_register(const struct usb_mon_operations *ops);
61243 void usb_mon_deregister(void);
61244
61245 #else
61246diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
61247index 62ff5e7..530b74e 100644
61248--- a/drivers/usb/misc/appledisplay.c
61249+++ b/drivers/usb/misc/appledisplay.c
61250@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
61251 return pdata->msgdata[1];
61252 }
61253
61254-static struct backlight_ops appledisplay_bl_data = {
61255+static const struct backlight_ops appledisplay_bl_data = {
61256 .get_brightness = appledisplay_bl_get_brightness,
61257 .update_status = appledisplay_bl_update_status,
61258 };
61259diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
61260index e0c2db3..bd8cb66 100644
61261--- a/drivers/usb/mon/mon_main.c
61262+++ b/drivers/usb/mon/mon_main.c
61263@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
61264 /*
61265 * Ops
61266 */
61267-static struct usb_mon_operations mon_ops_0 = {
61268+static const struct usb_mon_operations mon_ops_0 = {
61269 .urb_submit = mon_submit,
61270 .urb_submit_error = mon_submit_error,
61271 .urb_complete = mon_complete,
61272diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
61273index d6bea3e..60b250e 100644
61274--- a/drivers/usb/wusbcore/wa-hc.h
61275+++ b/drivers/usb/wusbcore/wa-hc.h
61276@@ -192,7 +192,7 @@ struct wahc {
61277 struct list_head xfer_delayed_list;
61278 spinlock_t xfer_list_lock;
61279 struct work_struct xfer_work;
61280- atomic_t xfer_id_count;
61281+ atomic_unchecked_t xfer_id_count;
61282 };
61283
61284
61285@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
61286 INIT_LIST_HEAD(&wa->xfer_delayed_list);
61287 spin_lock_init(&wa->xfer_list_lock);
61288 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
61289- atomic_set(&wa->xfer_id_count, 1);
61290+ atomic_set_unchecked(&wa->xfer_id_count, 1);
61291 }
61292
61293 /**
61294diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
61295index 613a5fc..3174865 100644
61296--- a/drivers/usb/wusbcore/wa-xfer.c
61297+++ b/drivers/usb/wusbcore/wa-xfer.c
61298@@ -293,7 +293,7 @@ out:
61299 */
61300 static void wa_xfer_id_init(struct wa_xfer *xfer)
61301 {
61302- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
61303+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
61304 }
61305
61306 /*
61307diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
61308index aa42fce..f8a828c 100644
61309--- a/drivers/uwb/wlp/messages.c
61310+++ b/drivers/uwb/wlp/messages.c
61311@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
61312 size_t len = skb->len;
61313 size_t used;
61314 ssize_t result;
61315- struct wlp_nonce enonce, rnonce;
61316+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
61317 enum wlp_assc_error assc_err;
61318 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
61319 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
61320diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
61321index 0370399..6627c94 100644
61322--- a/drivers/uwb/wlp/sysfs.c
61323+++ b/drivers/uwb/wlp/sysfs.c
61324@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
61325 return ret;
61326 }
61327
61328-static
61329-struct sysfs_ops wss_sysfs_ops = {
61330+static const struct sysfs_ops wss_sysfs_ops = {
61331 .show = wlp_wss_attr_show,
61332 .store = wlp_wss_attr_store,
61333 };
61334diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
61335index 8c5e432..5ee90ea 100644
61336--- a/drivers/video/atmel_lcdfb.c
61337+++ b/drivers/video/atmel_lcdfb.c
61338@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
61339 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
61340 }
61341
61342-static struct backlight_ops atmel_lcdc_bl_ops = {
61343+static const struct backlight_ops atmel_lcdc_bl_ops = {
61344 .update_status = atmel_bl_update_status,
61345 .get_brightness = atmel_bl_get_brightness,
61346 };
61347diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
61348index e4e4d43..66bcbcc 100644
61349--- a/drivers/video/aty/aty128fb.c
61350+++ b/drivers/video/aty/aty128fb.c
61351@@ -149,7 +149,7 @@ enum {
61352 };
61353
61354 /* Must match above enum */
61355-static const char *r128_family[] __devinitdata = {
61356+static const char *r128_family[] __devinitconst = {
61357 "AGP",
61358 "PCI",
61359 "PRO AGP",
61360@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
61361 return bd->props.brightness;
61362 }
61363
61364-static struct backlight_ops aty128_bl_data = {
61365+static const struct backlight_ops aty128_bl_data = {
61366 .get_brightness = aty128_bl_get_brightness,
61367 .update_status = aty128_bl_update_status,
61368 };
61369diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
61370index 913b4a4..9295a38 100644
61371--- a/drivers/video/aty/atyfb_base.c
61372+++ b/drivers/video/aty/atyfb_base.c
61373@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
61374 return bd->props.brightness;
61375 }
61376
61377-static struct backlight_ops aty_bl_data = {
61378+static const struct backlight_ops aty_bl_data = {
61379 .get_brightness = aty_bl_get_brightness,
61380 .update_status = aty_bl_update_status,
61381 };
61382diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
61383index 1a056ad..221bd6a 100644
61384--- a/drivers/video/aty/radeon_backlight.c
61385+++ b/drivers/video/aty/radeon_backlight.c
61386@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
61387 return bd->props.brightness;
61388 }
61389
61390-static struct backlight_ops radeon_bl_data = {
61391+static const struct backlight_ops radeon_bl_data = {
61392 .get_brightness = radeon_bl_get_brightness,
61393 .update_status = radeon_bl_update_status,
61394 };
61395diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
61396index ad05da5..3cb2cb9 100644
61397--- a/drivers/video/backlight/adp5520_bl.c
61398+++ b/drivers/video/backlight/adp5520_bl.c
61399@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
61400 return error ? data->current_brightness : reg_val;
61401 }
61402
61403-static struct backlight_ops adp5520_bl_ops = {
61404+static const struct backlight_ops adp5520_bl_ops = {
61405 .update_status = adp5520_bl_update_status,
61406 .get_brightness = adp5520_bl_get_brightness,
61407 };
61408diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
61409index 2c3bdfc..d769b0b 100644
61410--- a/drivers/video/backlight/adx_bl.c
61411+++ b/drivers/video/backlight/adx_bl.c
61412@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
61413 return 1;
61414 }
61415
61416-static struct backlight_ops adx_backlight_ops = {
61417+static const struct backlight_ops adx_backlight_ops = {
61418 .options = 0,
61419 .update_status = adx_backlight_update_status,
61420 .get_brightness = adx_backlight_get_brightness,
61421diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
61422index 505c082..6b6b3cc 100644
61423--- a/drivers/video/backlight/atmel-pwm-bl.c
61424+++ b/drivers/video/backlight/atmel-pwm-bl.c
61425@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
61426 return pwm_channel_enable(&pwmbl->pwmc);
61427 }
61428
61429-static struct backlight_ops atmel_pwm_bl_ops = {
61430+static const struct backlight_ops atmel_pwm_bl_ops = {
61431 .get_brightness = atmel_pwm_bl_get_intensity,
61432 .update_status = atmel_pwm_bl_set_intensity,
61433 };
61434diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
61435index 5e20e6e..89025e6 100644
61436--- a/drivers/video/backlight/backlight.c
61437+++ b/drivers/video/backlight/backlight.c
61438@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
61439 * ERR_PTR() or a pointer to the newly allocated device.
61440 */
61441 struct backlight_device *backlight_device_register(const char *name,
61442- struct device *parent, void *devdata, struct backlight_ops *ops)
61443+ struct device *parent, void *devdata, const struct backlight_ops *ops)
61444 {
61445 struct backlight_device *new_bd;
61446 int rc;
61447diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
61448index 9677494..b4bcf80 100644
61449--- a/drivers/video/backlight/corgi_lcd.c
61450+++ b/drivers/video/backlight/corgi_lcd.c
61451@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
61452 }
61453 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
61454
61455-static struct backlight_ops corgi_bl_ops = {
61456+static const struct backlight_ops corgi_bl_ops = {
61457 .get_brightness = corgi_bl_get_intensity,
61458 .update_status = corgi_bl_update_status,
61459 };
61460diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
61461index b9fe62b..2914bf1 100644
61462--- a/drivers/video/backlight/cr_bllcd.c
61463+++ b/drivers/video/backlight/cr_bllcd.c
61464@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
61465 return intensity;
61466 }
61467
61468-static struct backlight_ops cr_backlight_ops = {
61469+static const struct backlight_ops cr_backlight_ops = {
61470 .get_brightness = cr_backlight_get_intensity,
61471 .update_status = cr_backlight_set_intensity,
61472 };
61473diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
61474index 701a108..feacfd5 100644
61475--- a/drivers/video/backlight/da903x_bl.c
61476+++ b/drivers/video/backlight/da903x_bl.c
61477@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
61478 return data->current_brightness;
61479 }
61480
61481-static struct backlight_ops da903x_backlight_ops = {
61482+static const struct backlight_ops da903x_backlight_ops = {
61483 .update_status = da903x_backlight_update_status,
61484 .get_brightness = da903x_backlight_get_brightness,
61485 };
61486diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
61487index 6d27f62..e6d348e 100644
61488--- a/drivers/video/backlight/generic_bl.c
61489+++ b/drivers/video/backlight/generic_bl.c
61490@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
61491 }
61492 EXPORT_SYMBOL(corgibl_limit_intensity);
61493
61494-static struct backlight_ops genericbl_ops = {
61495+static const struct backlight_ops genericbl_ops = {
61496 .options = BL_CORE_SUSPENDRESUME,
61497 .get_brightness = genericbl_get_intensity,
61498 .update_status = genericbl_send_intensity,
61499diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
61500index 7fb4eef..f7cc528 100644
61501--- a/drivers/video/backlight/hp680_bl.c
61502+++ b/drivers/video/backlight/hp680_bl.c
61503@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
61504 return current_intensity;
61505 }
61506
61507-static struct backlight_ops hp680bl_ops = {
61508+static const struct backlight_ops hp680bl_ops = {
61509 .get_brightness = hp680bl_get_intensity,
61510 .update_status = hp680bl_set_intensity,
61511 };
61512diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
61513index 7aed256..db9071f 100644
61514--- a/drivers/video/backlight/jornada720_bl.c
61515+++ b/drivers/video/backlight/jornada720_bl.c
61516@@ -93,7 +93,7 @@ out:
61517 return ret;
61518 }
61519
61520-static struct backlight_ops jornada_bl_ops = {
61521+static const struct backlight_ops jornada_bl_ops = {
61522 .get_brightness = jornada_bl_get_brightness,
61523 .update_status = jornada_bl_update_status,
61524 .options = BL_CORE_SUSPENDRESUME,
61525diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
61526index a38fda1..939e7b8 100644
61527--- a/drivers/video/backlight/kb3886_bl.c
61528+++ b/drivers/video/backlight/kb3886_bl.c
61529@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
61530 return kb3886bl_intensity;
61531 }
61532
61533-static struct backlight_ops kb3886bl_ops = {
61534+static const struct backlight_ops kb3886bl_ops = {
61535 .get_brightness = kb3886bl_get_intensity,
61536 .update_status = kb3886bl_send_intensity,
61537 };
61538diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
61539index 6b488b8..00a9591 100644
61540--- a/drivers/video/backlight/locomolcd.c
61541+++ b/drivers/video/backlight/locomolcd.c
61542@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
61543 return current_intensity;
61544 }
61545
61546-static struct backlight_ops locomobl_data = {
61547+static const struct backlight_ops locomobl_data = {
61548 .get_brightness = locomolcd_get_intensity,
61549 .update_status = locomolcd_set_intensity,
61550 };
61551diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
61552index 99bdfa8..3dac448 100644
61553--- a/drivers/video/backlight/mbp_nvidia_bl.c
61554+++ b/drivers/video/backlight/mbp_nvidia_bl.c
61555@@ -33,7 +33,7 @@ struct dmi_match_data {
61556 unsigned long iostart;
61557 unsigned long iolen;
61558 /* Backlight operations structure. */
61559- struct backlight_ops backlight_ops;
61560+ const struct backlight_ops backlight_ops;
61561 };
61562
61563 /* Module parameters. */
61564diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
61565index cbad67e..3cf900e 100644
61566--- a/drivers/video/backlight/omap1_bl.c
61567+++ b/drivers/video/backlight/omap1_bl.c
61568@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
61569 return bl->current_intensity;
61570 }
61571
61572-static struct backlight_ops omapbl_ops = {
61573+static const struct backlight_ops omapbl_ops = {
61574 .get_brightness = omapbl_get_intensity,
61575 .update_status = omapbl_update_status,
61576 };
61577diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
61578index 9edaf24..075786e 100644
61579--- a/drivers/video/backlight/progear_bl.c
61580+++ b/drivers/video/backlight/progear_bl.c
61581@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
61582 return intensity - HW_LEVEL_MIN;
61583 }
61584
61585-static struct backlight_ops progearbl_ops = {
61586+static const struct backlight_ops progearbl_ops = {
61587 .get_brightness = progearbl_get_intensity,
61588 .update_status = progearbl_set_intensity,
61589 };
61590diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
61591index 8871662..df9e0b3 100644
61592--- a/drivers/video/backlight/pwm_bl.c
61593+++ b/drivers/video/backlight/pwm_bl.c
61594@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
61595 return bl->props.brightness;
61596 }
61597
61598-static struct backlight_ops pwm_backlight_ops = {
61599+static const struct backlight_ops pwm_backlight_ops = {
61600 .update_status = pwm_backlight_update_status,
61601 .get_brightness = pwm_backlight_get_brightness,
61602 };
61603diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
61604index 43edbad..e14ce4d 100644
61605--- a/drivers/video/backlight/tosa_bl.c
61606+++ b/drivers/video/backlight/tosa_bl.c
61607@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
61608 return props->brightness;
61609 }
61610
61611-static struct backlight_ops bl_ops = {
61612+static const struct backlight_ops bl_ops = {
61613 .get_brightness = tosa_bl_get_brightness,
61614 .update_status = tosa_bl_update_status,
61615 };
61616diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
61617index 467bdb7..e32add3 100644
61618--- a/drivers/video/backlight/wm831x_bl.c
61619+++ b/drivers/video/backlight/wm831x_bl.c
61620@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
61621 return data->current_brightness;
61622 }
61623
61624-static struct backlight_ops wm831x_backlight_ops = {
61625+static const struct backlight_ops wm831x_backlight_ops = {
61626 .options = BL_CORE_SUSPENDRESUME,
61627 .update_status = wm831x_backlight_update_status,
61628 .get_brightness = wm831x_backlight_get_brightness,
61629diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
61630index e49ae5e..db4e6f7 100644
61631--- a/drivers/video/bf54x-lq043fb.c
61632+++ b/drivers/video/bf54x-lq043fb.c
61633@@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd)
61634 return 0;
61635 }
61636
61637-static struct backlight_ops bfin_lq043fb_bl_ops = {
61638+static const struct backlight_ops bfin_lq043fb_bl_ops = {
61639 .get_brightness = bl_get_brightness,
61640 };
61641
61642diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
61643index 2c72a7c..d523e52 100644
61644--- a/drivers/video/bfin-t350mcqb-fb.c
61645+++ b/drivers/video/bfin-t350mcqb-fb.c
61646@@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd)
61647 return 0;
61648 }
61649
61650-static struct backlight_ops bfin_lq043fb_bl_ops = {
61651+static const struct backlight_ops bfin_lq043fb_bl_ops = {
61652 .get_brightness = bl_get_brightness,
61653 };
61654
61655diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
61656index f53b9f1..958bf4e 100644
61657--- a/drivers/video/fbcmap.c
61658+++ b/drivers/video/fbcmap.c
61659@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
61660 rc = -ENODEV;
61661 goto out;
61662 }
61663- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
61664- !info->fbops->fb_setcmap)) {
61665+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
61666 rc = -EINVAL;
61667 goto out1;
61668 }
61669diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
61670index 99bbd28..ad3829e 100644
61671--- a/drivers/video/fbmem.c
61672+++ b/drivers/video/fbmem.c
61673@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
61674 image->dx += image->width + 8;
61675 }
61676 } else if (rotate == FB_ROTATE_UD) {
61677- for (x = 0; x < num && image->dx >= 0; x++) {
61678+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
61679 info->fbops->fb_imageblit(info, image);
61680 image->dx -= image->width + 8;
61681 }
61682@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
61683 image->dy += image->height + 8;
61684 }
61685 } else if (rotate == FB_ROTATE_CCW) {
61686- for (x = 0; x < num && image->dy >= 0; x++) {
61687+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
61688 info->fbops->fb_imageblit(info, image);
61689 image->dy -= image->height + 8;
61690 }
61691@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
61692 int flags = info->flags;
61693 int ret = 0;
61694
61695+ pax_track_stack();
61696+
61697 if (var->activate & FB_ACTIVATE_INV_MODE) {
61698 struct fb_videomode mode1, mode2;
61699
61700@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
61701 void __user *argp = (void __user *)arg;
61702 long ret = 0;
61703
61704+ pax_track_stack();
61705+
61706 switch (cmd) {
61707 case FBIOGET_VSCREENINFO:
61708 if (!lock_fb_info(info))
61709@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
61710 return -EFAULT;
61711 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
61712 return -EINVAL;
61713- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
61714+ if (con2fb.framebuffer >= FB_MAX)
61715 return -EINVAL;
61716 if (!registered_fb[con2fb.framebuffer])
61717 request_module("fb%d", con2fb.framebuffer);
61718diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
61719index f20eff8..3e4f622 100644
61720--- a/drivers/video/geode/gx1fb_core.c
61721+++ b/drivers/video/geode/gx1fb_core.c
61722@@ -30,7 +30,7 @@ static int crt_option = 1;
61723 static char panel_option[32] = "";
61724
61725 /* Modes relevant to the GX1 (taken from modedb.c) */
61726-static const struct fb_videomode __initdata gx1_modedb[] = {
61727+static const struct fb_videomode __initconst gx1_modedb[] = {
61728 /* 640x480-60 VESA */
61729 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
61730 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
61731diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
61732index 896e53d..4d87d0b 100644
61733--- a/drivers/video/gxt4500.c
61734+++ b/drivers/video/gxt4500.c
61735@@ -156,7 +156,7 @@ struct gxt4500_par {
61736 static char *mode_option;
61737
61738 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
61739-static const struct fb_videomode defaultmode __devinitdata = {
61740+static const struct fb_videomode defaultmode __devinitconst = {
61741 .refresh = 60,
61742 .xres = 1280,
61743 .yres = 1024,
61744@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
61745 return 0;
61746 }
61747
61748-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
61749+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
61750 .id = "IBM GXT4500P",
61751 .type = FB_TYPE_PACKED_PIXELS,
61752 .visual = FB_VISUAL_PSEUDOCOLOR,
61753diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
61754index f5bedee..28c6028 100644
61755--- a/drivers/video/i810/i810_accel.c
61756+++ b/drivers/video/i810/i810_accel.c
61757@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
61758 }
61759 }
61760 printk("ringbuffer lockup!!!\n");
61761+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
61762 i810_report_error(mmio);
61763 par->dev_flags |= LOCKUP;
61764 info->pixmap.scan_align = 1;
61765diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
61766index 5743ea2..457f82c 100644
61767--- a/drivers/video/i810/i810_main.c
61768+++ b/drivers/video/i810/i810_main.c
61769@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
61770 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
61771
61772 /* PCI */
61773-static const char *i810_pci_list[] __devinitdata = {
61774+static const char *i810_pci_list[] __devinitconst = {
61775 "Intel(R) 810 Framebuffer Device" ,
61776 "Intel(R) 810-DC100 Framebuffer Device" ,
61777 "Intel(R) 810E Framebuffer Device" ,
61778diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
61779index 3c14e43..eafa544 100644
61780--- a/drivers/video/logo/logo_linux_clut224.ppm
61781+++ b/drivers/video/logo/logo_linux_clut224.ppm
61782@@ -1,1604 +1,1123 @@
61783 P3
61784-# Standard 224-color Linux logo
61785 80 80
61786 255
61787- 0 0 0 0 0 0 0 0 0 0 0 0
61788- 0 0 0 0 0 0 0 0 0 0 0 0
61789- 0 0 0 0 0 0 0 0 0 0 0 0
61790- 0 0 0 0 0 0 0 0 0 0 0 0
61791- 0 0 0 0 0 0 0 0 0 0 0 0
61792- 0 0 0 0 0 0 0 0 0 0 0 0
61793- 0 0 0 0 0 0 0 0 0 0 0 0
61794- 0 0 0 0 0 0 0 0 0 0 0 0
61795- 0 0 0 0 0 0 0 0 0 0 0 0
61796- 6 6 6 6 6 6 10 10 10 10 10 10
61797- 10 10 10 6 6 6 6 6 6 6 6 6
61798- 0 0 0 0 0 0 0 0 0 0 0 0
61799- 0 0 0 0 0 0 0 0 0 0 0 0
61800- 0 0 0 0 0 0 0 0 0 0 0 0
61801- 0 0 0 0 0 0 0 0 0 0 0 0
61802- 0 0 0 0 0 0 0 0 0 0 0 0
61803- 0 0 0 0 0 0 0 0 0 0 0 0
61804- 0 0 0 0 0 0 0 0 0 0 0 0
61805- 0 0 0 0 0 0 0 0 0 0 0 0
61806- 0 0 0 0 0 0 0 0 0 0 0 0
61807- 0 0 0 0 0 0 0 0 0 0 0 0
61808- 0 0 0 0 0 0 0 0 0 0 0 0
61809- 0 0 0 0 0 0 0 0 0 0 0 0
61810- 0 0 0 0 0 0 0 0 0 0 0 0
61811- 0 0 0 0 0 0 0 0 0 0 0 0
61812- 0 0 0 0 0 0 0 0 0 0 0 0
61813- 0 0 0 0 0 0 0 0 0 0 0 0
61814- 0 0 0 0 0 0 0 0 0 0 0 0
61815- 0 0 0 6 6 6 10 10 10 14 14 14
61816- 22 22 22 26 26 26 30 30 30 34 34 34
61817- 30 30 30 30 30 30 26 26 26 18 18 18
61818- 14 14 14 10 10 10 6 6 6 0 0 0
61819- 0 0 0 0 0 0 0 0 0 0 0 0
61820- 0 0 0 0 0 0 0 0 0 0 0 0
61821- 0 0 0 0 0 0 0 0 0 0 0 0
61822- 0 0 0 0 0 0 0 0 0 0 0 0
61823- 0 0 0 0 0 0 0 0 0 0 0 0
61824- 0 0 0 0 0 0 0 0 0 0 0 0
61825- 0 0 0 0 0 0 0 0 0 0 0 0
61826- 0 0 0 0 0 0 0 0 0 0 0 0
61827- 0 0 0 0 0 0 0 0 0 0 0 0
61828- 0 0 0 0 0 1 0 0 1 0 0 0
61829- 0 0 0 0 0 0 0 0 0 0 0 0
61830- 0 0 0 0 0 0 0 0 0 0 0 0
61831- 0 0 0 0 0 0 0 0 0 0 0 0
61832- 0 0 0 0 0 0 0 0 0 0 0 0
61833- 0 0 0 0 0 0 0 0 0 0 0 0
61834- 0 0 0 0 0 0 0 0 0 0 0 0
61835- 6 6 6 14 14 14 26 26 26 42 42 42
61836- 54 54 54 66 66 66 78 78 78 78 78 78
61837- 78 78 78 74 74 74 66 66 66 54 54 54
61838- 42 42 42 26 26 26 18 18 18 10 10 10
61839- 6 6 6 0 0 0 0 0 0 0 0 0
61840- 0 0 0 0 0 0 0 0 0 0 0 0
61841- 0 0 0 0 0 0 0 0 0 0 0 0
61842- 0 0 0 0 0 0 0 0 0 0 0 0
61843- 0 0 0 0 0 0 0 0 0 0 0 0
61844- 0 0 0 0 0 0 0 0 0 0 0 0
61845- 0 0 0 0 0 0 0 0 0 0 0 0
61846- 0 0 0 0 0 0 0 0 0 0 0 0
61847- 0 0 0 0 0 0 0 0 0 0 0 0
61848- 0 0 1 0 0 0 0 0 0 0 0 0
61849- 0 0 0 0 0 0 0 0 0 0 0 0
61850- 0 0 0 0 0 0 0 0 0 0 0 0
61851- 0 0 0 0 0 0 0 0 0 0 0 0
61852- 0 0 0 0 0 0 0 0 0 0 0 0
61853- 0 0 0 0 0 0 0 0 0 0 0 0
61854- 0 0 0 0 0 0 0 0 0 10 10 10
61855- 22 22 22 42 42 42 66 66 66 86 86 86
61856- 66 66 66 38 38 38 38 38 38 22 22 22
61857- 26 26 26 34 34 34 54 54 54 66 66 66
61858- 86 86 86 70 70 70 46 46 46 26 26 26
61859- 14 14 14 6 6 6 0 0 0 0 0 0
61860- 0 0 0 0 0 0 0 0 0 0 0 0
61861- 0 0 0 0 0 0 0 0 0 0 0 0
61862- 0 0 0 0 0 0 0 0 0 0 0 0
61863- 0 0 0 0 0 0 0 0 0 0 0 0
61864- 0 0 0 0 0 0 0 0 0 0 0 0
61865- 0 0 0 0 0 0 0 0 0 0 0 0
61866- 0 0 0 0 0 0 0 0 0 0 0 0
61867- 0 0 0 0 0 0 0 0 0 0 0 0
61868- 0 0 1 0 0 1 0 0 1 0 0 0
61869- 0 0 0 0 0 0 0 0 0 0 0 0
61870- 0 0 0 0 0 0 0 0 0 0 0 0
61871- 0 0 0 0 0 0 0 0 0 0 0 0
61872- 0 0 0 0 0 0 0 0 0 0 0 0
61873- 0 0 0 0 0 0 0 0 0 0 0 0
61874- 0 0 0 0 0 0 10 10 10 26 26 26
61875- 50 50 50 82 82 82 58 58 58 6 6 6
61876- 2 2 6 2 2 6 2 2 6 2 2 6
61877- 2 2 6 2 2 6 2 2 6 2 2 6
61878- 6 6 6 54 54 54 86 86 86 66 66 66
61879- 38 38 38 18 18 18 6 6 6 0 0 0
61880- 0 0 0 0 0 0 0 0 0 0 0 0
61881- 0 0 0 0 0 0 0 0 0 0 0 0
61882- 0 0 0 0 0 0 0 0 0 0 0 0
61883- 0 0 0 0 0 0 0 0 0 0 0 0
61884- 0 0 0 0 0 0 0 0 0 0 0 0
61885- 0 0 0 0 0 0 0 0 0 0 0 0
61886- 0 0 0 0 0 0 0 0 0 0 0 0
61887- 0 0 0 0 0 0 0 0 0 0 0 0
61888- 0 0 0 0 0 0 0 0 0 0 0 0
61889- 0 0 0 0 0 0 0 0 0 0 0 0
61890- 0 0 0 0 0 0 0 0 0 0 0 0
61891- 0 0 0 0 0 0 0 0 0 0 0 0
61892- 0 0 0 0 0 0 0 0 0 0 0 0
61893- 0 0 0 0 0 0 0 0 0 0 0 0
61894- 0 0 0 6 6 6 22 22 22 50 50 50
61895- 78 78 78 34 34 34 2 2 6 2 2 6
61896- 2 2 6 2 2 6 2 2 6 2 2 6
61897- 2 2 6 2 2 6 2 2 6 2 2 6
61898- 2 2 6 2 2 6 6 6 6 70 70 70
61899- 78 78 78 46 46 46 22 22 22 6 6 6
61900- 0 0 0 0 0 0 0 0 0 0 0 0
61901- 0 0 0 0 0 0 0 0 0 0 0 0
61902- 0 0 0 0 0 0 0 0 0 0 0 0
61903- 0 0 0 0 0 0 0 0 0 0 0 0
61904- 0 0 0 0 0 0 0 0 0 0 0 0
61905- 0 0 0 0 0 0 0 0 0 0 0 0
61906- 0 0 0 0 0 0 0 0 0 0 0 0
61907- 0 0 0 0 0 0 0 0 0 0 0 0
61908- 0 0 1 0 0 1 0 0 1 0 0 0
61909- 0 0 0 0 0 0 0 0 0 0 0 0
61910- 0 0 0 0 0 0 0 0 0 0 0 0
61911- 0 0 0 0 0 0 0 0 0 0 0 0
61912- 0 0 0 0 0 0 0 0 0 0 0 0
61913- 0 0 0 0 0 0 0 0 0 0 0 0
61914- 6 6 6 18 18 18 42 42 42 82 82 82
61915- 26 26 26 2 2 6 2 2 6 2 2 6
61916- 2 2 6 2 2 6 2 2 6 2 2 6
61917- 2 2 6 2 2 6 2 2 6 14 14 14
61918- 46 46 46 34 34 34 6 6 6 2 2 6
61919- 42 42 42 78 78 78 42 42 42 18 18 18
61920- 6 6 6 0 0 0 0 0 0 0 0 0
61921- 0 0 0 0 0 0 0 0 0 0 0 0
61922- 0 0 0 0 0 0 0 0 0 0 0 0
61923- 0 0 0 0 0 0 0 0 0 0 0 0
61924- 0 0 0 0 0 0 0 0 0 0 0 0
61925- 0 0 0 0 0 0 0 0 0 0 0 0
61926- 0 0 0 0 0 0 0 0 0 0 0 0
61927- 0 0 0 0 0 0 0 0 0 0 0 0
61928- 0 0 1 0 0 0 0 0 1 0 0 0
61929- 0 0 0 0 0 0 0 0 0 0 0 0
61930- 0 0 0 0 0 0 0 0 0 0 0 0
61931- 0 0 0 0 0 0 0 0 0 0 0 0
61932- 0 0 0 0 0 0 0 0 0 0 0 0
61933- 0 0 0 0 0 0 0 0 0 0 0 0
61934- 10 10 10 30 30 30 66 66 66 58 58 58
61935- 2 2 6 2 2 6 2 2 6 2 2 6
61936- 2 2 6 2 2 6 2 2 6 2 2 6
61937- 2 2 6 2 2 6 2 2 6 26 26 26
61938- 86 86 86 101 101 101 46 46 46 10 10 10
61939- 2 2 6 58 58 58 70 70 70 34 34 34
61940- 10 10 10 0 0 0 0 0 0 0 0 0
61941- 0 0 0 0 0 0 0 0 0 0 0 0
61942- 0 0 0 0 0 0 0 0 0 0 0 0
61943- 0 0 0 0 0 0 0 0 0 0 0 0
61944- 0 0 0 0 0 0 0 0 0 0 0 0
61945- 0 0 0 0 0 0 0 0 0 0 0 0
61946- 0 0 0 0 0 0 0 0 0 0 0 0
61947- 0 0 0 0 0 0 0 0 0 0 0 0
61948- 0 0 1 0 0 1 0 0 1 0 0 0
61949- 0 0 0 0 0 0 0 0 0 0 0 0
61950- 0 0 0 0 0 0 0 0 0 0 0 0
61951- 0 0 0 0 0 0 0 0 0 0 0 0
61952- 0 0 0 0 0 0 0 0 0 0 0 0
61953- 0 0 0 0 0 0 0 0 0 0 0 0
61954- 14 14 14 42 42 42 86 86 86 10 10 10
61955- 2 2 6 2 2 6 2 2 6 2 2 6
61956- 2 2 6 2 2 6 2 2 6 2 2 6
61957- 2 2 6 2 2 6 2 2 6 30 30 30
61958- 94 94 94 94 94 94 58 58 58 26 26 26
61959- 2 2 6 6 6 6 78 78 78 54 54 54
61960- 22 22 22 6 6 6 0 0 0 0 0 0
61961- 0 0 0 0 0 0 0 0 0 0 0 0
61962- 0 0 0 0 0 0 0 0 0 0 0 0
61963- 0 0 0 0 0 0 0 0 0 0 0 0
61964- 0 0 0 0 0 0 0 0 0 0 0 0
61965- 0 0 0 0 0 0 0 0 0 0 0 0
61966- 0 0 0 0 0 0 0 0 0 0 0 0
61967- 0 0 0 0 0 0 0 0 0 0 0 0
61968- 0 0 0 0 0 0 0 0 0 0 0 0
61969- 0 0 0 0 0 0 0 0 0 0 0 0
61970- 0 0 0 0 0 0 0 0 0 0 0 0
61971- 0 0 0 0 0 0 0 0 0 0 0 0
61972- 0 0 0 0 0 0 0 0 0 0 0 0
61973- 0 0 0 0 0 0 0 0 0 6 6 6
61974- 22 22 22 62 62 62 62 62 62 2 2 6
61975- 2 2 6 2 2 6 2 2 6 2 2 6
61976- 2 2 6 2 2 6 2 2 6 2 2 6
61977- 2 2 6 2 2 6 2 2 6 26 26 26
61978- 54 54 54 38 38 38 18 18 18 10 10 10
61979- 2 2 6 2 2 6 34 34 34 82 82 82
61980- 38 38 38 14 14 14 0 0 0 0 0 0
61981- 0 0 0 0 0 0 0 0 0 0 0 0
61982- 0 0 0 0 0 0 0 0 0 0 0 0
61983- 0 0 0 0 0 0 0 0 0 0 0 0
61984- 0 0 0 0 0 0 0 0 0 0 0 0
61985- 0 0 0 0 0 0 0 0 0 0 0 0
61986- 0 0 0 0 0 0 0 0 0 0 0 0
61987- 0 0 0 0 0 0 0 0 0 0 0 0
61988- 0 0 0 0 0 1 0 0 1 0 0 0
61989- 0 0 0 0 0 0 0 0 0 0 0 0
61990- 0 0 0 0 0 0 0 0 0 0 0 0
61991- 0 0 0 0 0 0 0 0 0 0 0 0
61992- 0 0 0 0 0 0 0 0 0 0 0 0
61993- 0 0 0 0 0 0 0 0 0 6 6 6
61994- 30 30 30 78 78 78 30 30 30 2 2 6
61995- 2 2 6 2 2 6 2 2 6 2 2 6
61996- 2 2 6 2 2 6 2 2 6 2 2 6
61997- 2 2 6 2 2 6 2 2 6 10 10 10
61998- 10 10 10 2 2 6 2 2 6 2 2 6
61999- 2 2 6 2 2 6 2 2 6 78 78 78
62000- 50 50 50 18 18 18 6 6 6 0 0 0
62001- 0 0 0 0 0 0 0 0 0 0 0 0
62002- 0 0 0 0 0 0 0 0 0 0 0 0
62003- 0 0 0 0 0 0 0 0 0 0 0 0
62004- 0 0 0 0 0 0 0 0 0 0 0 0
62005- 0 0 0 0 0 0 0 0 0 0 0 0
62006- 0 0 0 0 0 0 0 0 0 0 0 0
62007- 0 0 0 0 0 0 0 0 0 0 0 0
62008- 0 0 1 0 0 0 0 0 0 0 0 0
62009- 0 0 0 0 0 0 0 0 0 0 0 0
62010- 0 0 0 0 0 0 0 0 0 0 0 0
62011- 0 0 0 0 0 0 0 0 0 0 0 0
62012- 0 0 0 0 0 0 0 0 0 0 0 0
62013- 0 0 0 0 0 0 0 0 0 10 10 10
62014- 38 38 38 86 86 86 14 14 14 2 2 6
62015- 2 2 6 2 2 6 2 2 6 2 2 6
62016- 2 2 6 2 2 6 2 2 6 2 2 6
62017- 2 2 6 2 2 6 2 2 6 2 2 6
62018- 2 2 6 2 2 6 2 2 6 2 2 6
62019- 2 2 6 2 2 6 2 2 6 54 54 54
62020- 66 66 66 26 26 26 6 6 6 0 0 0
62021- 0 0 0 0 0 0 0 0 0 0 0 0
62022- 0 0 0 0 0 0 0 0 0 0 0 0
62023- 0 0 0 0 0 0 0 0 0 0 0 0
62024- 0 0 0 0 0 0 0 0 0 0 0 0
62025- 0 0 0 0 0 0 0 0 0 0 0 0
62026- 0 0 0 0 0 0 0 0 0 0 0 0
62027- 0 0 0 0 0 0 0 0 0 0 0 0
62028- 0 0 0 0 0 1 0 0 1 0 0 0
62029- 0 0 0 0 0 0 0 0 0 0 0 0
62030- 0 0 0 0 0 0 0 0 0 0 0 0
62031- 0 0 0 0 0 0 0 0 0 0 0 0
62032- 0 0 0 0 0 0 0 0 0 0 0 0
62033- 0 0 0 0 0 0 0 0 0 14 14 14
62034- 42 42 42 82 82 82 2 2 6 2 2 6
62035- 2 2 6 6 6 6 10 10 10 2 2 6
62036- 2 2 6 2 2 6 2 2 6 2 2 6
62037- 2 2 6 2 2 6 2 2 6 6 6 6
62038- 14 14 14 10 10 10 2 2 6 2 2 6
62039- 2 2 6 2 2 6 2 2 6 18 18 18
62040- 82 82 82 34 34 34 10 10 10 0 0 0
62041- 0 0 0 0 0 0 0 0 0 0 0 0
62042- 0 0 0 0 0 0 0 0 0 0 0 0
62043- 0 0 0 0 0 0 0 0 0 0 0 0
62044- 0 0 0 0 0 0 0 0 0 0 0 0
62045- 0 0 0 0 0 0 0 0 0 0 0 0
62046- 0 0 0 0 0 0 0 0 0 0 0 0
62047- 0 0 0 0 0 0 0 0 0 0 0 0
62048- 0 0 1 0 0 0 0 0 0 0 0 0
62049- 0 0 0 0 0 0 0 0 0 0 0 0
62050- 0 0 0 0 0 0 0 0 0 0 0 0
62051- 0 0 0 0 0 0 0 0 0 0 0 0
62052- 0 0 0 0 0 0 0 0 0 0 0 0
62053- 0 0 0 0 0 0 0 0 0 14 14 14
62054- 46 46 46 86 86 86 2 2 6 2 2 6
62055- 6 6 6 6 6 6 22 22 22 34 34 34
62056- 6 6 6 2 2 6 2 2 6 2 2 6
62057- 2 2 6 2 2 6 18 18 18 34 34 34
62058- 10 10 10 50 50 50 22 22 22 2 2 6
62059- 2 2 6 2 2 6 2 2 6 10 10 10
62060- 86 86 86 42 42 42 14 14 14 0 0 0
62061- 0 0 0 0 0 0 0 0 0 0 0 0
62062- 0 0 0 0 0 0 0 0 0 0 0 0
62063- 0 0 0 0 0 0 0 0 0 0 0 0
62064- 0 0 0 0 0 0 0 0 0 0 0 0
62065- 0 0 0 0 0 0 0 0 0 0 0 0
62066- 0 0 0 0 0 0 0 0 0 0 0 0
62067- 0 0 0 0 0 0 0 0 0 0 0 0
62068- 0 0 1 0 0 1 0 0 1 0 0 0
62069- 0 0 0 0 0 0 0 0 0 0 0 0
62070- 0 0 0 0 0 0 0 0 0 0 0 0
62071- 0 0 0 0 0 0 0 0 0 0 0 0
62072- 0 0 0 0 0 0 0 0 0 0 0 0
62073- 0 0 0 0 0 0 0 0 0 14 14 14
62074- 46 46 46 86 86 86 2 2 6 2 2 6
62075- 38 38 38 116 116 116 94 94 94 22 22 22
62076- 22 22 22 2 2 6 2 2 6 2 2 6
62077- 14 14 14 86 86 86 138 138 138 162 162 162
62078-154 154 154 38 38 38 26 26 26 6 6 6
62079- 2 2 6 2 2 6 2 2 6 2 2 6
62080- 86 86 86 46 46 46 14 14 14 0 0 0
62081- 0 0 0 0 0 0 0 0 0 0 0 0
62082- 0 0 0 0 0 0 0 0 0 0 0 0
62083- 0 0 0 0 0 0 0 0 0 0 0 0
62084- 0 0 0 0 0 0 0 0 0 0 0 0
62085- 0 0 0 0 0 0 0 0 0 0 0 0
62086- 0 0 0 0 0 0 0 0 0 0 0 0
62087- 0 0 0 0 0 0 0 0 0 0 0 0
62088- 0 0 0 0 0 0 0 0 0 0 0 0
62089- 0 0 0 0 0 0 0 0 0 0 0 0
62090- 0 0 0 0 0 0 0 0 0 0 0 0
62091- 0 0 0 0 0 0 0 0 0 0 0 0
62092- 0 0 0 0 0 0 0 0 0 0 0 0
62093- 0 0 0 0 0 0 0 0 0 14 14 14
62094- 46 46 46 86 86 86 2 2 6 14 14 14
62095-134 134 134 198 198 198 195 195 195 116 116 116
62096- 10 10 10 2 2 6 2 2 6 6 6 6
62097-101 98 89 187 187 187 210 210 210 218 218 218
62098-214 214 214 134 134 134 14 14 14 6 6 6
62099- 2 2 6 2 2 6 2 2 6 2 2 6
62100- 86 86 86 50 50 50 18 18 18 6 6 6
62101- 0 0 0 0 0 0 0 0 0 0 0 0
62102- 0 0 0 0 0 0 0 0 0 0 0 0
62103- 0 0 0 0 0 0 0 0 0 0 0 0
62104- 0 0 0 0 0 0 0 0 0 0 0 0
62105- 0 0 0 0 0 0 0 0 0 0 0 0
62106- 0 0 0 0 0 0 0 0 0 0 0 0
62107- 0 0 0 0 0 0 0 0 1 0 0 0
62108- 0 0 1 0 0 1 0 0 1 0 0 0
62109- 0 0 0 0 0 0 0 0 0 0 0 0
62110- 0 0 0 0 0 0 0 0 0 0 0 0
62111- 0 0 0 0 0 0 0 0 0 0 0 0
62112- 0 0 0 0 0 0 0 0 0 0 0 0
62113- 0 0 0 0 0 0 0 0 0 14 14 14
62114- 46 46 46 86 86 86 2 2 6 54 54 54
62115-218 218 218 195 195 195 226 226 226 246 246 246
62116- 58 58 58 2 2 6 2 2 6 30 30 30
62117-210 210 210 253 253 253 174 174 174 123 123 123
62118-221 221 221 234 234 234 74 74 74 2 2 6
62119- 2 2 6 2 2 6 2 2 6 2 2 6
62120- 70 70 70 58 58 58 22 22 22 6 6 6
62121- 0 0 0 0 0 0 0 0 0 0 0 0
62122- 0 0 0 0 0 0 0 0 0 0 0 0
62123- 0 0 0 0 0 0 0 0 0 0 0 0
62124- 0 0 0 0 0 0 0 0 0 0 0 0
62125- 0 0 0 0 0 0 0 0 0 0 0 0
62126- 0 0 0 0 0 0 0 0 0 0 0 0
62127- 0 0 0 0 0 0 0 0 0 0 0 0
62128- 0 0 0 0 0 0 0 0 0 0 0 0
62129- 0 0 0 0 0 0 0 0 0 0 0 0
62130- 0 0 0 0 0 0 0 0 0 0 0 0
62131- 0 0 0 0 0 0 0 0 0 0 0 0
62132- 0 0 0 0 0 0 0 0 0 0 0 0
62133- 0 0 0 0 0 0 0 0 0 14 14 14
62134- 46 46 46 82 82 82 2 2 6 106 106 106
62135-170 170 170 26 26 26 86 86 86 226 226 226
62136-123 123 123 10 10 10 14 14 14 46 46 46
62137-231 231 231 190 190 190 6 6 6 70 70 70
62138- 90 90 90 238 238 238 158 158 158 2 2 6
62139- 2 2 6 2 2 6 2 2 6 2 2 6
62140- 70 70 70 58 58 58 22 22 22 6 6 6
62141- 0 0 0 0 0 0 0 0 0 0 0 0
62142- 0 0 0 0 0 0 0 0 0 0 0 0
62143- 0 0 0 0 0 0 0 0 0 0 0 0
62144- 0 0 0 0 0 0 0 0 0 0 0 0
62145- 0 0 0 0 0 0 0 0 0 0 0 0
62146- 0 0 0 0 0 0 0 0 0 0 0 0
62147- 0 0 0 0 0 0 0 0 1 0 0 0
62148- 0 0 1 0 0 1 0 0 1 0 0 0
62149- 0 0 0 0 0 0 0 0 0 0 0 0
62150- 0 0 0 0 0 0 0 0 0 0 0 0
62151- 0 0 0 0 0 0 0 0 0 0 0 0
62152- 0 0 0 0 0 0 0 0 0 0 0 0
62153- 0 0 0 0 0 0 0 0 0 14 14 14
62154- 42 42 42 86 86 86 6 6 6 116 116 116
62155-106 106 106 6 6 6 70 70 70 149 149 149
62156-128 128 128 18 18 18 38 38 38 54 54 54
62157-221 221 221 106 106 106 2 2 6 14 14 14
62158- 46 46 46 190 190 190 198 198 198 2 2 6
62159- 2 2 6 2 2 6 2 2 6 2 2 6
62160- 74 74 74 62 62 62 22 22 22 6 6 6
62161- 0 0 0 0 0 0 0 0 0 0 0 0
62162- 0 0 0 0 0 0 0 0 0 0 0 0
62163- 0 0 0 0 0 0 0 0 0 0 0 0
62164- 0 0 0 0 0 0 0 0 0 0 0 0
62165- 0 0 0 0 0 0 0 0 0 0 0 0
62166- 0 0 0 0 0 0 0 0 0 0 0 0
62167- 0 0 0 0 0 0 0 0 1 0 0 0
62168- 0 0 1 0 0 0 0 0 1 0 0 0
62169- 0 0 0 0 0 0 0 0 0 0 0 0
62170- 0 0 0 0 0 0 0 0 0 0 0 0
62171- 0 0 0 0 0 0 0 0 0 0 0 0
62172- 0 0 0 0 0 0 0 0 0 0 0 0
62173- 0 0 0 0 0 0 0 0 0 14 14 14
62174- 42 42 42 94 94 94 14 14 14 101 101 101
62175-128 128 128 2 2 6 18 18 18 116 116 116
62176-118 98 46 121 92 8 121 92 8 98 78 10
62177-162 162 162 106 106 106 2 2 6 2 2 6
62178- 2 2 6 195 195 195 195 195 195 6 6 6
62179- 2 2 6 2 2 6 2 2 6 2 2 6
62180- 74 74 74 62 62 62 22 22 22 6 6 6
62181- 0 0 0 0 0 0 0 0 0 0 0 0
62182- 0 0 0 0 0 0 0 0 0 0 0 0
62183- 0 0 0 0 0 0 0 0 0 0 0 0
62184- 0 0 0 0 0 0 0 0 0 0 0 0
62185- 0 0 0 0 0 0 0 0 0 0 0 0
62186- 0 0 0 0 0 0 0 0 0 0 0 0
62187- 0 0 0 0 0 0 0 0 1 0 0 1
62188- 0 0 1 0 0 0 0 0 1 0 0 0
62189- 0 0 0 0 0 0 0 0 0 0 0 0
62190- 0 0 0 0 0 0 0 0 0 0 0 0
62191- 0 0 0 0 0 0 0 0 0 0 0 0
62192- 0 0 0 0 0 0 0 0 0 0 0 0
62193- 0 0 0 0 0 0 0 0 0 10 10 10
62194- 38 38 38 90 90 90 14 14 14 58 58 58
62195-210 210 210 26 26 26 54 38 6 154 114 10
62196-226 170 11 236 186 11 225 175 15 184 144 12
62197-215 174 15 175 146 61 37 26 9 2 2 6
62198- 70 70 70 246 246 246 138 138 138 2 2 6
62199- 2 2 6 2 2 6 2 2 6 2 2 6
62200- 70 70 70 66 66 66 26 26 26 6 6 6
62201- 0 0 0 0 0 0 0 0 0 0 0 0
62202- 0 0 0 0 0 0 0 0 0 0 0 0
62203- 0 0 0 0 0 0 0 0 0 0 0 0
62204- 0 0 0 0 0 0 0 0 0 0 0 0
62205- 0 0 0 0 0 0 0 0 0 0 0 0
62206- 0 0 0 0 0 0 0 0 0 0 0 0
62207- 0 0 0 0 0 0 0 0 0 0 0 0
62208- 0 0 0 0 0 0 0 0 0 0 0 0
62209- 0 0 0 0 0 0 0 0 0 0 0 0
62210- 0 0 0 0 0 0 0 0 0 0 0 0
62211- 0 0 0 0 0 0 0 0 0 0 0 0
62212- 0 0 0 0 0 0 0 0 0 0 0 0
62213- 0 0 0 0 0 0 0 0 0 10 10 10
62214- 38 38 38 86 86 86 14 14 14 10 10 10
62215-195 195 195 188 164 115 192 133 9 225 175 15
62216-239 182 13 234 190 10 232 195 16 232 200 30
62217-245 207 45 241 208 19 232 195 16 184 144 12
62218-218 194 134 211 206 186 42 42 42 2 2 6
62219- 2 2 6 2 2 6 2 2 6 2 2 6
62220- 50 50 50 74 74 74 30 30 30 6 6 6
62221- 0 0 0 0 0 0 0 0 0 0 0 0
62222- 0 0 0 0 0 0 0 0 0 0 0 0
62223- 0 0 0 0 0 0 0 0 0 0 0 0
62224- 0 0 0 0 0 0 0 0 0 0 0 0
62225- 0 0 0 0 0 0 0 0 0 0 0 0
62226- 0 0 0 0 0 0 0 0 0 0 0 0
62227- 0 0 0 0 0 0 0 0 0 0 0 0
62228- 0 0 0 0 0 0 0 0 0 0 0 0
62229- 0 0 0 0 0 0 0 0 0 0 0 0
62230- 0 0 0 0 0 0 0 0 0 0 0 0
62231- 0 0 0 0 0 0 0 0 0 0 0 0
62232- 0 0 0 0 0 0 0 0 0 0 0 0
62233- 0 0 0 0 0 0 0 0 0 10 10 10
62234- 34 34 34 86 86 86 14 14 14 2 2 6
62235-121 87 25 192 133 9 219 162 10 239 182 13
62236-236 186 11 232 195 16 241 208 19 244 214 54
62237-246 218 60 246 218 38 246 215 20 241 208 19
62238-241 208 19 226 184 13 121 87 25 2 2 6
62239- 2 2 6 2 2 6 2 2 6 2 2 6
62240- 50 50 50 82 82 82 34 34 34 10 10 10
62241- 0 0 0 0 0 0 0 0 0 0 0 0
62242- 0 0 0 0 0 0 0 0 0 0 0 0
62243- 0 0 0 0 0 0 0 0 0 0 0 0
62244- 0 0 0 0 0 0 0 0 0 0 0 0
62245- 0 0 0 0 0 0 0 0 0 0 0 0
62246- 0 0 0 0 0 0 0 0 0 0 0 0
62247- 0 0 0 0 0 0 0 0 0 0 0 0
62248- 0 0 0 0 0 0 0 0 0 0 0 0
62249- 0 0 0 0 0 0 0 0 0 0 0 0
62250- 0 0 0 0 0 0 0 0 0 0 0 0
62251- 0 0 0 0 0 0 0 0 0 0 0 0
62252- 0 0 0 0 0 0 0 0 0 0 0 0
62253- 0 0 0 0 0 0 0 0 0 10 10 10
62254- 34 34 34 82 82 82 30 30 30 61 42 6
62255-180 123 7 206 145 10 230 174 11 239 182 13
62256-234 190 10 238 202 15 241 208 19 246 218 74
62257-246 218 38 246 215 20 246 215 20 246 215 20
62258-226 184 13 215 174 15 184 144 12 6 6 6
62259- 2 2 6 2 2 6 2 2 6 2 2 6
62260- 26 26 26 94 94 94 42 42 42 14 14 14
62261- 0 0 0 0 0 0 0 0 0 0 0 0
62262- 0 0 0 0 0 0 0 0 0 0 0 0
62263- 0 0 0 0 0 0 0 0 0 0 0 0
62264- 0 0 0 0 0 0 0 0 0 0 0 0
62265- 0 0 0 0 0 0 0 0 0 0 0 0
62266- 0 0 0 0 0 0 0 0 0 0 0 0
62267- 0 0 0 0 0 0 0 0 0 0 0 0
62268- 0 0 0 0 0 0 0 0 0 0 0 0
62269- 0 0 0 0 0 0 0 0 0 0 0 0
62270- 0 0 0 0 0 0 0 0 0 0 0 0
62271- 0 0 0 0 0 0 0 0 0 0 0 0
62272- 0 0 0 0 0 0 0 0 0 0 0 0
62273- 0 0 0 0 0 0 0 0 0 10 10 10
62274- 30 30 30 78 78 78 50 50 50 104 69 6
62275-192 133 9 216 158 10 236 178 12 236 186 11
62276-232 195 16 241 208 19 244 214 54 245 215 43
62277-246 215 20 246 215 20 241 208 19 198 155 10
62278-200 144 11 216 158 10 156 118 10 2 2 6
62279- 2 2 6 2 2 6 2 2 6 2 2 6
62280- 6 6 6 90 90 90 54 54 54 18 18 18
62281- 6 6 6 0 0 0 0 0 0 0 0 0
62282- 0 0 0 0 0 0 0 0 0 0 0 0
62283- 0 0 0 0 0 0 0 0 0 0 0 0
62284- 0 0 0 0 0 0 0 0 0 0 0 0
62285- 0 0 0 0 0 0 0 0 0 0 0 0
62286- 0 0 0 0 0 0 0 0 0 0 0 0
62287- 0 0 0 0 0 0 0 0 0 0 0 0
62288- 0 0 0 0 0 0 0 0 0 0 0 0
62289- 0 0 0 0 0 0 0 0 0 0 0 0
62290- 0 0 0 0 0 0 0 0 0 0 0 0
62291- 0 0 0 0 0 0 0 0 0 0 0 0
62292- 0 0 0 0 0 0 0 0 0 0 0 0
62293- 0 0 0 0 0 0 0 0 0 10 10 10
62294- 30 30 30 78 78 78 46 46 46 22 22 22
62295-137 92 6 210 162 10 239 182 13 238 190 10
62296-238 202 15 241 208 19 246 215 20 246 215 20
62297-241 208 19 203 166 17 185 133 11 210 150 10
62298-216 158 10 210 150 10 102 78 10 2 2 6
62299- 6 6 6 54 54 54 14 14 14 2 2 6
62300- 2 2 6 62 62 62 74 74 74 30 30 30
62301- 10 10 10 0 0 0 0 0 0 0 0 0
62302- 0 0 0 0 0 0 0 0 0 0 0 0
62303- 0 0 0 0 0 0 0 0 0 0 0 0
62304- 0 0 0 0 0 0 0 0 0 0 0 0
62305- 0 0 0 0 0 0 0 0 0 0 0 0
62306- 0 0 0 0 0 0 0 0 0 0 0 0
62307- 0 0 0 0 0 0 0 0 0 0 0 0
62308- 0 0 0 0 0 0 0 0 0 0 0 0
62309- 0 0 0 0 0 0 0 0 0 0 0 0
62310- 0 0 0 0 0 0 0 0 0 0 0 0
62311- 0 0 0 0 0 0 0 0 0 0 0 0
62312- 0 0 0 0 0 0 0 0 0 0 0 0
62313- 0 0 0 0 0 0 0 0 0 10 10 10
62314- 34 34 34 78 78 78 50 50 50 6 6 6
62315- 94 70 30 139 102 15 190 146 13 226 184 13
62316-232 200 30 232 195 16 215 174 15 190 146 13
62317-168 122 10 192 133 9 210 150 10 213 154 11
62318-202 150 34 182 157 106 101 98 89 2 2 6
62319- 2 2 6 78 78 78 116 116 116 58 58 58
62320- 2 2 6 22 22 22 90 90 90 46 46 46
62321- 18 18 18 6 6 6 0 0 0 0 0 0
62322- 0 0 0 0 0 0 0 0 0 0 0 0
62323- 0 0 0 0 0 0 0 0 0 0 0 0
62324- 0 0 0 0 0 0 0 0 0 0 0 0
62325- 0 0 0 0 0 0 0 0 0 0 0 0
62326- 0 0 0 0 0 0 0 0 0 0 0 0
62327- 0 0 0 0 0 0 0 0 0 0 0 0
62328- 0 0 0 0 0 0 0 0 0 0 0 0
62329- 0 0 0 0 0 0 0 0 0 0 0 0
62330- 0 0 0 0 0 0 0 0 0 0 0 0
62331- 0 0 0 0 0 0 0 0 0 0 0 0
62332- 0 0 0 0 0 0 0 0 0 0 0 0
62333- 0 0 0 0 0 0 0 0 0 10 10 10
62334- 38 38 38 86 86 86 50 50 50 6 6 6
62335-128 128 128 174 154 114 156 107 11 168 122 10
62336-198 155 10 184 144 12 197 138 11 200 144 11
62337-206 145 10 206 145 10 197 138 11 188 164 115
62338-195 195 195 198 198 198 174 174 174 14 14 14
62339- 2 2 6 22 22 22 116 116 116 116 116 116
62340- 22 22 22 2 2 6 74 74 74 70 70 70
62341- 30 30 30 10 10 10 0 0 0 0 0 0
62342- 0 0 0 0 0 0 0 0 0 0 0 0
62343- 0 0 0 0 0 0 0 0 0 0 0 0
62344- 0 0 0 0 0 0 0 0 0 0 0 0
62345- 0 0 0 0 0 0 0 0 0 0 0 0
62346- 0 0 0 0 0 0 0 0 0 0 0 0
62347- 0 0 0 0 0 0 0 0 0 0 0 0
62348- 0 0 0 0 0 0 0 0 0 0 0 0
62349- 0 0 0 0 0 0 0 0 0 0 0 0
62350- 0 0 0 0 0 0 0 0 0 0 0 0
62351- 0 0 0 0 0 0 0 0 0 0 0 0
62352- 0 0 0 0 0 0 0 0 0 0 0 0
62353- 0 0 0 0 0 0 6 6 6 18 18 18
62354- 50 50 50 101 101 101 26 26 26 10 10 10
62355-138 138 138 190 190 190 174 154 114 156 107 11
62356-197 138 11 200 144 11 197 138 11 192 133 9
62357-180 123 7 190 142 34 190 178 144 187 187 187
62358-202 202 202 221 221 221 214 214 214 66 66 66
62359- 2 2 6 2 2 6 50 50 50 62 62 62
62360- 6 6 6 2 2 6 10 10 10 90 90 90
62361- 50 50 50 18 18 18 6 6 6 0 0 0
62362- 0 0 0 0 0 0 0 0 0 0 0 0
62363- 0 0 0 0 0 0 0 0 0 0 0 0
62364- 0 0 0 0 0 0 0 0 0 0 0 0
62365- 0 0 0 0 0 0 0 0 0 0 0 0
62366- 0 0 0 0 0 0 0 0 0 0 0 0
62367- 0 0 0 0 0 0 0 0 0 0 0 0
62368- 0 0 0 0 0 0 0 0 0 0 0 0
62369- 0 0 0 0 0 0 0 0 0 0 0 0
62370- 0 0 0 0 0 0 0 0 0 0 0 0
62371- 0 0 0 0 0 0 0 0 0 0 0 0
62372- 0 0 0 0 0 0 0 0 0 0 0 0
62373- 0 0 0 0 0 0 10 10 10 34 34 34
62374- 74 74 74 74 74 74 2 2 6 6 6 6
62375-144 144 144 198 198 198 190 190 190 178 166 146
62376-154 121 60 156 107 11 156 107 11 168 124 44
62377-174 154 114 187 187 187 190 190 190 210 210 210
62378-246 246 246 253 253 253 253 253 253 182 182 182
62379- 6 6 6 2 2 6 2 2 6 2 2 6
62380- 2 2 6 2 2 6 2 2 6 62 62 62
62381- 74 74 74 34 34 34 14 14 14 0 0 0
62382- 0 0 0 0 0 0 0 0 0 0 0 0
62383- 0 0 0 0 0 0 0 0 0 0 0 0
62384- 0 0 0 0 0 0 0 0 0 0 0 0
62385- 0 0 0 0 0 0 0 0 0 0 0 0
62386- 0 0 0 0 0 0 0 0 0 0 0 0
62387- 0 0 0 0 0 0 0 0 0 0 0 0
62388- 0 0 0 0 0 0 0 0 0 0 0 0
62389- 0 0 0 0 0 0 0 0 0 0 0 0
62390- 0 0 0 0 0 0 0 0 0 0 0 0
62391- 0 0 0 0 0 0 0 0 0 0 0 0
62392- 0 0 0 0 0 0 0 0 0 0 0 0
62393- 0 0 0 10 10 10 22 22 22 54 54 54
62394- 94 94 94 18 18 18 2 2 6 46 46 46
62395-234 234 234 221 221 221 190 190 190 190 190 190
62396-190 190 190 187 187 187 187 187 187 190 190 190
62397-190 190 190 195 195 195 214 214 214 242 242 242
62398-253 253 253 253 253 253 253 253 253 253 253 253
62399- 82 82 82 2 2 6 2 2 6 2 2 6
62400- 2 2 6 2 2 6 2 2 6 14 14 14
62401- 86 86 86 54 54 54 22 22 22 6 6 6
62402- 0 0 0 0 0 0 0 0 0 0 0 0
62403- 0 0 0 0 0 0 0 0 0 0 0 0
62404- 0 0 0 0 0 0 0 0 0 0 0 0
62405- 0 0 0 0 0 0 0 0 0 0 0 0
62406- 0 0 0 0 0 0 0 0 0 0 0 0
62407- 0 0 0 0 0 0 0 0 0 0 0 0
62408- 0 0 0 0 0 0 0 0 0 0 0 0
62409- 0 0 0 0 0 0 0 0 0 0 0 0
62410- 0 0 0 0 0 0 0 0 0 0 0 0
62411- 0 0 0 0 0 0 0 0 0 0 0 0
62412- 0 0 0 0 0 0 0 0 0 0 0 0
62413- 6 6 6 18 18 18 46 46 46 90 90 90
62414- 46 46 46 18 18 18 6 6 6 182 182 182
62415-253 253 253 246 246 246 206 206 206 190 190 190
62416-190 190 190 190 190 190 190 190 190 190 190 190
62417-206 206 206 231 231 231 250 250 250 253 253 253
62418-253 253 253 253 253 253 253 253 253 253 253 253
62419-202 202 202 14 14 14 2 2 6 2 2 6
62420- 2 2 6 2 2 6 2 2 6 2 2 6
62421- 42 42 42 86 86 86 42 42 42 18 18 18
62422- 6 6 6 0 0 0 0 0 0 0 0 0
62423- 0 0 0 0 0 0 0 0 0 0 0 0
62424- 0 0 0 0 0 0 0 0 0 0 0 0
62425- 0 0 0 0 0 0 0 0 0 0 0 0
62426- 0 0 0 0 0 0 0 0 0 0 0 0
62427- 0 0 0 0 0 0 0 0 0 0 0 0
62428- 0 0 0 0 0 0 0 0 0 0 0 0
62429- 0 0 0 0 0 0 0 0 0 0 0 0
62430- 0 0 0 0 0 0 0 0 0 0 0 0
62431- 0 0 0 0 0 0 0 0 0 0 0 0
62432- 0 0 0 0 0 0 0 0 0 6 6 6
62433- 14 14 14 38 38 38 74 74 74 66 66 66
62434- 2 2 6 6 6 6 90 90 90 250 250 250
62435-253 253 253 253 253 253 238 238 238 198 198 198
62436-190 190 190 190 190 190 195 195 195 221 221 221
62437-246 246 246 253 253 253 253 253 253 253 253 253
62438-253 253 253 253 253 253 253 253 253 253 253 253
62439-253 253 253 82 82 82 2 2 6 2 2 6
62440- 2 2 6 2 2 6 2 2 6 2 2 6
62441- 2 2 6 78 78 78 70 70 70 34 34 34
62442- 14 14 14 6 6 6 0 0 0 0 0 0
62443- 0 0 0 0 0 0 0 0 0 0 0 0
62444- 0 0 0 0 0 0 0 0 0 0 0 0
62445- 0 0 0 0 0 0 0 0 0 0 0 0
62446- 0 0 0 0 0 0 0 0 0 0 0 0
62447- 0 0 0 0 0 0 0 0 0 0 0 0
62448- 0 0 0 0 0 0 0 0 0 0 0 0
62449- 0 0 0 0 0 0 0 0 0 0 0 0
62450- 0 0 0 0 0 0 0 0 0 0 0 0
62451- 0 0 0 0 0 0 0 0 0 0 0 0
62452- 0 0 0 0 0 0 0 0 0 14 14 14
62453- 34 34 34 66 66 66 78 78 78 6 6 6
62454- 2 2 6 18 18 18 218 218 218 253 253 253
62455-253 253 253 253 253 253 253 253 253 246 246 246
62456-226 226 226 231 231 231 246 246 246 253 253 253
62457-253 253 253 253 253 253 253 253 253 253 253 253
62458-253 253 253 253 253 253 253 253 253 253 253 253
62459-253 253 253 178 178 178 2 2 6 2 2 6
62460- 2 2 6 2 2 6 2 2 6 2 2 6
62461- 2 2 6 18 18 18 90 90 90 62 62 62
62462- 30 30 30 10 10 10 0 0 0 0 0 0
62463- 0 0 0 0 0 0 0 0 0 0 0 0
62464- 0 0 0 0 0 0 0 0 0 0 0 0
62465- 0 0 0 0 0 0 0 0 0 0 0 0
62466- 0 0 0 0 0 0 0 0 0 0 0 0
62467- 0 0 0 0 0 0 0 0 0 0 0 0
62468- 0 0 0 0 0 0 0 0 0 0 0 0
62469- 0 0 0 0 0 0 0 0 0 0 0 0
62470- 0 0 0 0 0 0 0 0 0 0 0 0
62471- 0 0 0 0 0 0 0 0 0 0 0 0
62472- 0 0 0 0 0 0 10 10 10 26 26 26
62473- 58 58 58 90 90 90 18 18 18 2 2 6
62474- 2 2 6 110 110 110 253 253 253 253 253 253
62475-253 253 253 253 253 253 253 253 253 253 253 253
62476-250 250 250 253 253 253 253 253 253 253 253 253
62477-253 253 253 253 253 253 253 253 253 253 253 253
62478-253 253 253 253 253 253 253 253 253 253 253 253
62479-253 253 253 231 231 231 18 18 18 2 2 6
62480- 2 2 6 2 2 6 2 2 6 2 2 6
62481- 2 2 6 2 2 6 18 18 18 94 94 94
62482- 54 54 54 26 26 26 10 10 10 0 0 0
62483- 0 0 0 0 0 0 0 0 0 0 0 0
62484- 0 0 0 0 0 0 0 0 0 0 0 0
62485- 0 0 0 0 0 0 0 0 0 0 0 0
62486- 0 0 0 0 0 0 0 0 0 0 0 0
62487- 0 0 0 0 0 0 0 0 0 0 0 0
62488- 0 0 0 0 0 0 0 0 0 0 0 0
62489- 0 0 0 0 0 0 0 0 0 0 0 0
62490- 0 0 0 0 0 0 0 0 0 0 0 0
62491- 0 0 0 0 0 0 0 0 0 0 0 0
62492- 0 0 0 6 6 6 22 22 22 50 50 50
62493- 90 90 90 26 26 26 2 2 6 2 2 6
62494- 14 14 14 195 195 195 250 250 250 253 253 253
62495-253 253 253 253 253 253 253 253 253 253 253 253
62496-253 253 253 253 253 253 253 253 253 253 253 253
62497-253 253 253 253 253 253 253 253 253 253 253 253
62498-253 253 253 253 253 253 253 253 253 253 253 253
62499-250 250 250 242 242 242 54 54 54 2 2 6
62500- 2 2 6 2 2 6 2 2 6 2 2 6
62501- 2 2 6 2 2 6 2 2 6 38 38 38
62502- 86 86 86 50 50 50 22 22 22 6 6 6
62503- 0 0 0 0 0 0 0 0 0 0 0 0
62504- 0 0 0 0 0 0 0 0 0 0 0 0
62505- 0 0 0 0 0 0 0 0 0 0 0 0
62506- 0 0 0 0 0 0 0 0 0 0 0 0
62507- 0 0 0 0 0 0 0 0 0 0 0 0
62508- 0 0 0 0 0 0 0 0 0 0 0 0
62509- 0 0 0 0 0 0 0 0 0 0 0 0
62510- 0 0 0 0 0 0 0 0 0 0 0 0
62511- 0 0 0 0 0 0 0 0 0 0 0 0
62512- 6 6 6 14 14 14 38 38 38 82 82 82
62513- 34 34 34 2 2 6 2 2 6 2 2 6
62514- 42 42 42 195 195 195 246 246 246 253 253 253
62515-253 253 253 253 253 253 253 253 253 250 250 250
62516-242 242 242 242 242 242 250 250 250 253 253 253
62517-253 253 253 253 253 253 253 253 253 253 253 253
62518-253 253 253 250 250 250 246 246 246 238 238 238
62519-226 226 226 231 231 231 101 101 101 6 6 6
62520- 2 2 6 2 2 6 2 2 6 2 2 6
62521- 2 2 6 2 2 6 2 2 6 2 2 6
62522- 38 38 38 82 82 82 42 42 42 14 14 14
62523- 6 6 6 0 0 0 0 0 0 0 0 0
62524- 0 0 0 0 0 0 0 0 0 0 0 0
62525- 0 0 0 0 0 0 0 0 0 0 0 0
62526- 0 0 0 0 0 0 0 0 0 0 0 0
62527- 0 0 0 0 0 0 0 0 0 0 0 0
62528- 0 0 0 0 0 0 0 0 0 0 0 0
62529- 0 0 0 0 0 0 0 0 0 0 0 0
62530- 0 0 0 0 0 0 0 0 0 0 0 0
62531- 0 0 0 0 0 0 0 0 0 0 0 0
62532- 10 10 10 26 26 26 62 62 62 66 66 66
62533- 2 2 6 2 2 6 2 2 6 6 6 6
62534- 70 70 70 170 170 170 206 206 206 234 234 234
62535-246 246 246 250 250 250 250 250 250 238 238 238
62536-226 226 226 231 231 231 238 238 238 250 250 250
62537-250 250 250 250 250 250 246 246 246 231 231 231
62538-214 214 214 206 206 206 202 202 202 202 202 202
62539-198 198 198 202 202 202 182 182 182 18 18 18
62540- 2 2 6 2 2 6 2 2 6 2 2 6
62541- 2 2 6 2 2 6 2 2 6 2 2 6
62542- 2 2 6 62 62 62 66 66 66 30 30 30
62543- 10 10 10 0 0 0 0 0 0 0 0 0
62544- 0 0 0 0 0 0 0 0 0 0 0 0
62545- 0 0 0 0 0 0 0 0 0 0 0 0
62546- 0 0 0 0 0 0 0 0 0 0 0 0
62547- 0 0 0 0 0 0 0 0 0 0 0 0
62548- 0 0 0 0 0 0 0 0 0 0 0 0
62549- 0 0 0 0 0 0 0 0 0 0 0 0
62550- 0 0 0 0 0 0 0 0 0 0 0 0
62551- 0 0 0 0 0 0 0 0 0 0 0 0
62552- 14 14 14 42 42 42 82 82 82 18 18 18
62553- 2 2 6 2 2 6 2 2 6 10 10 10
62554- 94 94 94 182 182 182 218 218 218 242 242 242
62555-250 250 250 253 253 253 253 253 253 250 250 250
62556-234 234 234 253 253 253 253 253 253 253 253 253
62557-253 253 253 253 253 253 253 253 253 246 246 246
62558-238 238 238 226 226 226 210 210 210 202 202 202
62559-195 195 195 195 195 195 210 210 210 158 158 158
62560- 6 6 6 14 14 14 50 50 50 14 14 14
62561- 2 2 6 2 2 6 2 2 6 2 2 6
62562- 2 2 6 6 6 6 86 86 86 46 46 46
62563- 18 18 18 6 6 6 0 0 0 0 0 0
62564- 0 0 0 0 0 0 0 0 0 0 0 0
62565- 0 0 0 0 0 0 0 0 0 0 0 0
62566- 0 0 0 0 0 0 0 0 0 0 0 0
62567- 0 0 0 0 0 0 0 0 0 0 0 0
62568- 0 0 0 0 0 0 0 0 0 0 0 0
62569- 0 0 0 0 0 0 0 0 0 0 0 0
62570- 0 0 0 0 0 0 0 0 0 0 0 0
62571- 0 0 0 0 0 0 0 0 0 6 6 6
62572- 22 22 22 54 54 54 70 70 70 2 2 6
62573- 2 2 6 10 10 10 2 2 6 22 22 22
62574-166 166 166 231 231 231 250 250 250 253 253 253
62575-253 253 253 253 253 253 253 253 253 250 250 250
62576-242 242 242 253 253 253 253 253 253 253 253 253
62577-253 253 253 253 253 253 253 253 253 253 253 253
62578-253 253 253 253 253 253 253 253 253 246 246 246
62579-231 231 231 206 206 206 198 198 198 226 226 226
62580- 94 94 94 2 2 6 6 6 6 38 38 38
62581- 30 30 30 2 2 6 2 2 6 2 2 6
62582- 2 2 6 2 2 6 62 62 62 66 66 66
62583- 26 26 26 10 10 10 0 0 0 0 0 0
62584- 0 0 0 0 0 0 0 0 0 0 0 0
62585- 0 0 0 0 0 0 0 0 0 0 0 0
62586- 0 0 0 0 0 0 0 0 0 0 0 0
62587- 0 0 0 0 0 0 0 0 0 0 0 0
62588- 0 0 0 0 0 0 0 0 0 0 0 0
62589- 0 0 0 0 0 0 0 0 0 0 0 0
62590- 0 0 0 0 0 0 0 0 0 0 0 0
62591- 0 0 0 0 0 0 0 0 0 10 10 10
62592- 30 30 30 74 74 74 50 50 50 2 2 6
62593- 26 26 26 26 26 26 2 2 6 106 106 106
62594-238 238 238 253 253 253 253 253 253 253 253 253
62595-253 253 253 253 253 253 253 253 253 253 253 253
62596-253 253 253 253 253 253 253 253 253 253 253 253
62597-253 253 253 253 253 253 253 253 253 253 253 253
62598-253 253 253 253 253 253 253 253 253 253 253 253
62599-253 253 253 246 246 246 218 218 218 202 202 202
62600-210 210 210 14 14 14 2 2 6 2 2 6
62601- 30 30 30 22 22 22 2 2 6 2 2 6
62602- 2 2 6 2 2 6 18 18 18 86 86 86
62603- 42 42 42 14 14 14 0 0 0 0 0 0
62604- 0 0 0 0 0 0 0 0 0 0 0 0
62605- 0 0 0 0 0 0 0 0 0 0 0 0
62606- 0 0 0 0 0 0 0 0 0 0 0 0
62607- 0 0 0 0 0 0 0 0 0 0 0 0
62608- 0 0 0 0 0 0 0 0 0 0 0 0
62609- 0 0 0 0 0 0 0 0 0 0 0 0
62610- 0 0 0 0 0 0 0 0 0 0 0 0
62611- 0 0 0 0 0 0 0 0 0 14 14 14
62612- 42 42 42 90 90 90 22 22 22 2 2 6
62613- 42 42 42 2 2 6 18 18 18 218 218 218
62614-253 253 253 253 253 253 253 253 253 253 253 253
62615-253 253 253 253 253 253 253 253 253 253 253 253
62616-253 253 253 253 253 253 253 253 253 253 253 253
62617-253 253 253 253 253 253 253 253 253 253 253 253
62618-253 253 253 253 253 253 253 253 253 253 253 253
62619-253 253 253 253 253 253 250 250 250 221 221 221
62620-218 218 218 101 101 101 2 2 6 14 14 14
62621- 18 18 18 38 38 38 10 10 10 2 2 6
62622- 2 2 6 2 2 6 2 2 6 78 78 78
62623- 58 58 58 22 22 22 6 6 6 0 0 0
62624- 0 0 0 0 0 0 0 0 0 0 0 0
62625- 0 0 0 0 0 0 0 0 0 0 0 0
62626- 0 0 0 0 0 0 0 0 0 0 0 0
62627- 0 0 0 0 0 0 0 0 0 0 0 0
62628- 0 0 0 0 0 0 0 0 0 0 0 0
62629- 0 0 0 0 0 0 0 0 0 0 0 0
62630- 0 0 0 0 0 0 0 0 0 0 0 0
62631- 0 0 0 0 0 0 6 6 6 18 18 18
62632- 54 54 54 82 82 82 2 2 6 26 26 26
62633- 22 22 22 2 2 6 123 123 123 253 253 253
62634-253 253 253 253 253 253 253 253 253 253 253 253
62635-253 253 253 253 253 253 253 253 253 253 253 253
62636-253 253 253 253 253 253 253 253 253 253 253 253
62637-253 253 253 253 253 253 253 253 253 253 253 253
62638-253 253 253 253 253 253 253 253 253 253 253 253
62639-253 253 253 253 253 253 253 253 253 250 250 250
62640-238 238 238 198 198 198 6 6 6 38 38 38
62641- 58 58 58 26 26 26 38 38 38 2 2 6
62642- 2 2 6 2 2 6 2 2 6 46 46 46
62643- 78 78 78 30 30 30 10 10 10 0 0 0
62644- 0 0 0 0 0 0 0 0 0 0 0 0
62645- 0 0 0 0 0 0 0 0 0 0 0 0
62646- 0 0 0 0 0 0 0 0 0 0 0 0
62647- 0 0 0 0 0 0 0 0 0 0 0 0
62648- 0 0 0 0 0 0 0 0 0 0 0 0
62649- 0 0 0 0 0 0 0 0 0 0 0 0
62650- 0 0 0 0 0 0 0 0 0 0 0 0
62651- 0 0 0 0 0 0 10 10 10 30 30 30
62652- 74 74 74 58 58 58 2 2 6 42 42 42
62653- 2 2 6 22 22 22 231 231 231 253 253 253
62654-253 253 253 253 253 253 253 253 253 253 253 253
62655-253 253 253 253 253 253 253 253 253 250 250 250
62656-253 253 253 253 253 253 253 253 253 253 253 253
62657-253 253 253 253 253 253 253 253 253 253 253 253
62658-253 253 253 253 253 253 253 253 253 253 253 253
62659-253 253 253 253 253 253 253 253 253 253 253 253
62660-253 253 253 246 246 246 46 46 46 38 38 38
62661- 42 42 42 14 14 14 38 38 38 14 14 14
62662- 2 2 6 2 2 6 2 2 6 6 6 6
62663- 86 86 86 46 46 46 14 14 14 0 0 0
62664- 0 0 0 0 0 0 0 0 0 0 0 0
62665- 0 0 0 0 0 0 0 0 0 0 0 0
62666- 0 0 0 0 0 0 0 0 0 0 0 0
62667- 0 0 0 0 0 0 0 0 0 0 0 0
62668- 0 0 0 0 0 0 0 0 0 0 0 0
62669- 0 0 0 0 0 0 0 0 0 0 0 0
62670- 0 0 0 0 0 0 0 0 0 0 0 0
62671- 0 0 0 6 6 6 14 14 14 42 42 42
62672- 90 90 90 18 18 18 18 18 18 26 26 26
62673- 2 2 6 116 116 116 253 253 253 253 253 253
62674-253 253 253 253 253 253 253 253 253 253 253 253
62675-253 253 253 253 253 253 250 250 250 238 238 238
62676-253 253 253 253 253 253 253 253 253 253 253 253
62677-253 253 253 253 253 253 253 253 253 253 253 253
62678-253 253 253 253 253 253 253 253 253 253 253 253
62679-253 253 253 253 253 253 253 253 253 253 253 253
62680-253 253 253 253 253 253 94 94 94 6 6 6
62681- 2 2 6 2 2 6 10 10 10 34 34 34
62682- 2 2 6 2 2 6 2 2 6 2 2 6
62683- 74 74 74 58 58 58 22 22 22 6 6 6
62684- 0 0 0 0 0 0 0 0 0 0 0 0
62685- 0 0 0 0 0 0 0 0 0 0 0 0
62686- 0 0 0 0 0 0 0 0 0 0 0 0
62687- 0 0 0 0 0 0 0 0 0 0 0 0
62688- 0 0 0 0 0 0 0 0 0 0 0 0
62689- 0 0 0 0 0 0 0 0 0 0 0 0
62690- 0 0 0 0 0 0 0 0 0 0 0 0
62691- 0 0 0 10 10 10 26 26 26 66 66 66
62692- 82 82 82 2 2 6 38 38 38 6 6 6
62693- 14 14 14 210 210 210 253 253 253 253 253 253
62694-253 253 253 253 253 253 253 253 253 253 253 253
62695-253 253 253 253 253 253 246 246 246 242 242 242
62696-253 253 253 253 253 253 253 253 253 253 253 253
62697-253 253 253 253 253 253 253 253 253 253 253 253
62698-253 253 253 253 253 253 253 253 253 253 253 253
62699-253 253 253 253 253 253 253 253 253 253 253 253
62700-253 253 253 253 253 253 144 144 144 2 2 6
62701- 2 2 6 2 2 6 2 2 6 46 46 46
62702- 2 2 6 2 2 6 2 2 6 2 2 6
62703- 42 42 42 74 74 74 30 30 30 10 10 10
62704- 0 0 0 0 0 0 0 0 0 0 0 0
62705- 0 0 0 0 0 0 0 0 0 0 0 0
62706- 0 0 0 0 0 0 0 0 0 0 0 0
62707- 0 0 0 0 0 0 0 0 0 0 0 0
62708- 0 0 0 0 0 0 0 0 0 0 0 0
62709- 0 0 0 0 0 0 0 0 0 0 0 0
62710- 0 0 0 0 0 0 0 0 0 0 0 0
62711- 6 6 6 14 14 14 42 42 42 90 90 90
62712- 26 26 26 6 6 6 42 42 42 2 2 6
62713- 74 74 74 250 250 250 253 253 253 253 253 253
62714-253 253 253 253 253 253 253 253 253 253 253 253
62715-253 253 253 253 253 253 242 242 242 242 242 242
62716-253 253 253 253 253 253 253 253 253 253 253 253
62717-253 253 253 253 253 253 253 253 253 253 253 253
62718-253 253 253 253 253 253 253 253 253 253 253 253
62719-253 253 253 253 253 253 253 253 253 253 253 253
62720-253 253 253 253 253 253 182 182 182 2 2 6
62721- 2 2 6 2 2 6 2 2 6 46 46 46
62722- 2 2 6 2 2 6 2 2 6 2 2 6
62723- 10 10 10 86 86 86 38 38 38 10 10 10
62724- 0 0 0 0 0 0 0 0 0 0 0 0
62725- 0 0 0 0 0 0 0 0 0 0 0 0
62726- 0 0 0 0 0 0 0 0 0 0 0 0
62727- 0 0 0 0 0 0 0 0 0 0 0 0
62728- 0 0 0 0 0 0 0 0 0 0 0 0
62729- 0 0 0 0 0 0 0 0 0 0 0 0
62730- 0 0 0 0 0 0 0 0 0 0 0 0
62731- 10 10 10 26 26 26 66 66 66 82 82 82
62732- 2 2 6 22 22 22 18 18 18 2 2 6
62733-149 149 149 253 253 253 253 253 253 253 253 253
62734-253 253 253 253 253 253 253 253 253 253 253 253
62735-253 253 253 253 253 253 234 234 234 242 242 242
62736-253 253 253 253 253 253 253 253 253 253 253 253
62737-253 253 253 253 253 253 253 253 253 253 253 253
62738-253 253 253 253 253 253 253 253 253 253 253 253
62739-253 253 253 253 253 253 253 253 253 253 253 253
62740-253 253 253 253 253 253 206 206 206 2 2 6
62741- 2 2 6 2 2 6 2 2 6 38 38 38
62742- 2 2 6 2 2 6 2 2 6 2 2 6
62743- 6 6 6 86 86 86 46 46 46 14 14 14
62744- 0 0 0 0 0 0 0 0 0 0 0 0
62745- 0 0 0 0 0 0 0 0 0 0 0 0
62746- 0 0 0 0 0 0 0 0 0 0 0 0
62747- 0 0 0 0 0 0 0 0 0 0 0 0
62748- 0 0 0 0 0 0 0 0 0 0 0 0
62749- 0 0 0 0 0 0 0 0 0 0 0 0
62750- 0 0 0 0 0 0 0 0 0 6 6 6
62751- 18 18 18 46 46 46 86 86 86 18 18 18
62752- 2 2 6 34 34 34 10 10 10 6 6 6
62753-210 210 210 253 253 253 253 253 253 253 253 253
62754-253 253 253 253 253 253 253 253 253 253 253 253
62755-253 253 253 253 253 253 234 234 234 242 242 242
62756-253 253 253 253 253 253 253 253 253 253 253 253
62757-253 253 253 253 253 253 253 253 253 253 253 253
62758-253 253 253 253 253 253 253 253 253 253 253 253
62759-253 253 253 253 253 253 253 253 253 253 253 253
62760-253 253 253 253 253 253 221 221 221 6 6 6
62761- 2 2 6 2 2 6 6 6 6 30 30 30
62762- 2 2 6 2 2 6 2 2 6 2 2 6
62763- 2 2 6 82 82 82 54 54 54 18 18 18
62764- 6 6 6 0 0 0 0 0 0 0 0 0
62765- 0 0 0 0 0 0 0 0 0 0 0 0
62766- 0 0 0 0 0 0 0 0 0 0 0 0
62767- 0 0 0 0 0 0 0 0 0 0 0 0
62768- 0 0 0 0 0 0 0 0 0 0 0 0
62769- 0 0 0 0 0 0 0 0 0 0 0 0
62770- 0 0 0 0 0 0 0 0 0 10 10 10
62771- 26 26 26 66 66 66 62 62 62 2 2 6
62772- 2 2 6 38 38 38 10 10 10 26 26 26
62773-238 238 238 253 253 253 253 253 253 253 253 253
62774-253 253 253 253 253 253 253 253 253 253 253 253
62775-253 253 253 253 253 253 231 231 231 238 238 238
62776-253 253 253 253 253 253 253 253 253 253 253 253
62777-253 253 253 253 253 253 253 253 253 253 253 253
62778-253 253 253 253 253 253 253 253 253 253 253 253
62779-253 253 253 253 253 253 253 253 253 253 253 253
62780-253 253 253 253 253 253 231 231 231 6 6 6
62781- 2 2 6 2 2 6 10 10 10 30 30 30
62782- 2 2 6 2 2 6 2 2 6 2 2 6
62783- 2 2 6 66 66 66 58 58 58 22 22 22
62784- 6 6 6 0 0 0 0 0 0 0 0 0
62785- 0 0 0 0 0 0 0 0 0 0 0 0
62786- 0 0 0 0 0 0 0 0 0 0 0 0
62787- 0 0 0 0 0 0 0 0 0 0 0 0
62788- 0 0 0 0 0 0 0 0 0 0 0 0
62789- 0 0 0 0 0 0 0 0 0 0 0 0
62790- 0 0 0 0 0 0 0 0 0 10 10 10
62791- 38 38 38 78 78 78 6 6 6 2 2 6
62792- 2 2 6 46 46 46 14 14 14 42 42 42
62793-246 246 246 253 253 253 253 253 253 253 253 253
62794-253 253 253 253 253 253 253 253 253 253 253 253
62795-253 253 253 253 253 253 231 231 231 242 242 242
62796-253 253 253 253 253 253 253 253 253 253 253 253
62797-253 253 253 253 253 253 253 253 253 253 253 253
62798-253 253 253 253 253 253 253 253 253 253 253 253
62799-253 253 253 253 253 253 253 253 253 253 253 253
62800-253 253 253 253 253 253 234 234 234 10 10 10
62801- 2 2 6 2 2 6 22 22 22 14 14 14
62802- 2 2 6 2 2 6 2 2 6 2 2 6
62803- 2 2 6 66 66 66 62 62 62 22 22 22
62804- 6 6 6 0 0 0 0 0 0 0 0 0
62805- 0 0 0 0 0 0 0 0 0 0 0 0
62806- 0 0 0 0 0 0 0 0 0 0 0 0
62807- 0 0 0 0 0 0 0 0 0 0 0 0
62808- 0 0 0 0 0 0 0 0 0 0 0 0
62809- 0 0 0 0 0 0 0 0 0 0 0 0
62810- 0 0 0 0 0 0 6 6 6 18 18 18
62811- 50 50 50 74 74 74 2 2 6 2 2 6
62812- 14 14 14 70 70 70 34 34 34 62 62 62
62813-250 250 250 253 253 253 253 253 253 253 253 253
62814-253 253 253 253 253 253 253 253 253 253 253 253
62815-253 253 253 253 253 253 231 231 231 246 246 246
62816-253 253 253 253 253 253 253 253 253 253 253 253
62817-253 253 253 253 253 253 253 253 253 253 253 253
62818-253 253 253 253 253 253 253 253 253 253 253 253
62819-253 253 253 253 253 253 253 253 253 253 253 253
62820-253 253 253 253 253 253 234 234 234 14 14 14
62821- 2 2 6 2 2 6 30 30 30 2 2 6
62822- 2 2 6 2 2 6 2 2 6 2 2 6
62823- 2 2 6 66 66 66 62 62 62 22 22 22
62824- 6 6 6 0 0 0 0 0 0 0 0 0
62825- 0 0 0 0 0 0 0 0 0 0 0 0
62826- 0 0 0 0 0 0 0 0 0 0 0 0
62827- 0 0 0 0 0 0 0 0 0 0 0 0
62828- 0 0 0 0 0 0 0 0 0 0 0 0
62829- 0 0 0 0 0 0 0 0 0 0 0 0
62830- 0 0 0 0 0 0 6 6 6 18 18 18
62831- 54 54 54 62 62 62 2 2 6 2 2 6
62832- 2 2 6 30 30 30 46 46 46 70 70 70
62833-250 250 250 253 253 253 253 253 253 253 253 253
62834-253 253 253 253 253 253 253 253 253 253 253 253
62835-253 253 253 253 253 253 231 231 231 246 246 246
62836-253 253 253 253 253 253 253 253 253 253 253 253
62837-253 253 253 253 253 253 253 253 253 253 253 253
62838-253 253 253 253 253 253 253 253 253 253 253 253
62839-253 253 253 253 253 253 253 253 253 253 253 253
62840-253 253 253 253 253 253 226 226 226 10 10 10
62841- 2 2 6 6 6 6 30 30 30 2 2 6
62842- 2 2 6 2 2 6 2 2 6 2 2 6
62843- 2 2 6 66 66 66 58 58 58 22 22 22
62844- 6 6 6 0 0 0 0 0 0 0 0 0
62845- 0 0 0 0 0 0 0 0 0 0 0 0
62846- 0 0 0 0 0 0 0 0 0 0 0 0
62847- 0 0 0 0 0 0 0 0 0 0 0 0
62848- 0 0 0 0 0 0 0 0 0 0 0 0
62849- 0 0 0 0 0 0 0 0 0 0 0 0
62850- 0 0 0 0 0 0 6 6 6 22 22 22
62851- 58 58 58 62 62 62 2 2 6 2 2 6
62852- 2 2 6 2 2 6 30 30 30 78 78 78
62853-250 250 250 253 253 253 253 253 253 253 253 253
62854-253 253 253 253 253 253 253 253 253 253 253 253
62855-253 253 253 253 253 253 231 231 231 246 246 246
62856-253 253 253 253 253 253 253 253 253 253 253 253
62857-253 253 253 253 253 253 253 253 253 253 253 253
62858-253 253 253 253 253 253 253 253 253 253 253 253
62859-253 253 253 253 253 253 253 253 253 253 253 253
62860-253 253 253 253 253 253 206 206 206 2 2 6
62861- 22 22 22 34 34 34 18 14 6 22 22 22
62862- 26 26 26 18 18 18 6 6 6 2 2 6
62863- 2 2 6 82 82 82 54 54 54 18 18 18
62864- 6 6 6 0 0 0 0 0 0 0 0 0
62865- 0 0 0 0 0 0 0 0 0 0 0 0
62866- 0 0 0 0 0 0 0 0 0 0 0 0
62867- 0 0 0 0 0 0 0 0 0 0 0 0
62868- 0 0 0 0 0 0 0 0 0 0 0 0
62869- 0 0 0 0 0 0 0 0 0 0 0 0
62870- 0 0 0 0 0 0 6 6 6 26 26 26
62871- 62 62 62 106 106 106 74 54 14 185 133 11
62872-210 162 10 121 92 8 6 6 6 62 62 62
62873-238 238 238 253 253 253 253 253 253 253 253 253
62874-253 253 253 253 253 253 253 253 253 253 253 253
62875-253 253 253 253 253 253 231 231 231 246 246 246
62876-253 253 253 253 253 253 253 253 253 253 253 253
62877-253 253 253 253 253 253 253 253 253 253 253 253
62878-253 253 253 253 253 253 253 253 253 253 253 253
62879-253 253 253 253 253 253 253 253 253 253 253 253
62880-253 253 253 253 253 253 158 158 158 18 18 18
62881- 14 14 14 2 2 6 2 2 6 2 2 6
62882- 6 6 6 18 18 18 66 66 66 38 38 38
62883- 6 6 6 94 94 94 50 50 50 18 18 18
62884- 6 6 6 0 0 0 0 0 0 0 0 0
62885- 0 0 0 0 0 0 0 0 0 0 0 0
62886- 0 0 0 0 0 0 0 0 0 0 0 0
62887- 0 0 0 0 0 0 0 0 0 0 0 0
62888- 0 0 0 0 0 0 0 0 0 0 0 0
62889- 0 0 0 0 0 0 0 0 0 6 6 6
62890- 10 10 10 10 10 10 18 18 18 38 38 38
62891- 78 78 78 142 134 106 216 158 10 242 186 14
62892-246 190 14 246 190 14 156 118 10 10 10 10
62893- 90 90 90 238 238 238 253 253 253 253 253 253
62894-253 253 253 253 253 253 253 253 253 253 253 253
62895-253 253 253 253 253 253 231 231 231 250 250 250
62896-253 253 253 253 253 253 253 253 253 253 253 253
62897-253 253 253 253 253 253 253 253 253 253 253 253
62898-253 253 253 253 253 253 253 253 253 253 253 253
62899-253 253 253 253 253 253 253 253 253 246 230 190
62900-238 204 91 238 204 91 181 142 44 37 26 9
62901- 2 2 6 2 2 6 2 2 6 2 2 6
62902- 2 2 6 2 2 6 38 38 38 46 46 46
62903- 26 26 26 106 106 106 54 54 54 18 18 18
62904- 6 6 6 0 0 0 0 0 0 0 0 0
62905- 0 0 0 0 0 0 0 0 0 0 0 0
62906- 0 0 0 0 0 0 0 0 0 0 0 0
62907- 0 0 0 0 0 0 0 0 0 0 0 0
62908- 0 0 0 0 0 0 0 0 0 0 0 0
62909- 0 0 0 6 6 6 14 14 14 22 22 22
62910- 30 30 30 38 38 38 50 50 50 70 70 70
62911-106 106 106 190 142 34 226 170 11 242 186 14
62912-246 190 14 246 190 14 246 190 14 154 114 10
62913- 6 6 6 74 74 74 226 226 226 253 253 253
62914-253 253 253 253 253 253 253 253 253 253 253 253
62915-253 253 253 253 253 253 231 231 231 250 250 250
62916-253 253 253 253 253 253 253 253 253 253 253 253
62917-253 253 253 253 253 253 253 253 253 253 253 253
62918-253 253 253 253 253 253 253 253 253 253 253 253
62919-253 253 253 253 253 253 253 253 253 228 184 62
62920-241 196 14 241 208 19 232 195 16 38 30 10
62921- 2 2 6 2 2 6 2 2 6 2 2 6
62922- 2 2 6 6 6 6 30 30 30 26 26 26
62923-203 166 17 154 142 90 66 66 66 26 26 26
62924- 6 6 6 0 0 0 0 0 0 0 0 0
62925- 0 0 0 0 0 0 0 0 0 0 0 0
62926- 0 0 0 0 0 0 0 0 0 0 0 0
62927- 0 0 0 0 0 0 0 0 0 0 0 0
62928- 0 0 0 0 0 0 0 0 0 0 0 0
62929- 6 6 6 18 18 18 38 38 38 58 58 58
62930- 78 78 78 86 86 86 101 101 101 123 123 123
62931-175 146 61 210 150 10 234 174 13 246 186 14
62932-246 190 14 246 190 14 246 190 14 238 190 10
62933-102 78 10 2 2 6 46 46 46 198 198 198
62934-253 253 253 253 253 253 253 253 253 253 253 253
62935-253 253 253 253 253 253 234 234 234 242 242 242
62936-253 253 253 253 253 253 253 253 253 253 253 253
62937-253 253 253 253 253 253 253 253 253 253 253 253
62938-253 253 253 253 253 253 253 253 253 253 253 253
62939-253 253 253 253 253 253 253 253 253 224 178 62
62940-242 186 14 241 196 14 210 166 10 22 18 6
62941- 2 2 6 2 2 6 2 2 6 2 2 6
62942- 2 2 6 2 2 6 6 6 6 121 92 8
62943-238 202 15 232 195 16 82 82 82 34 34 34
62944- 10 10 10 0 0 0 0 0 0 0 0 0
62945- 0 0 0 0 0 0 0 0 0 0 0 0
62946- 0 0 0 0 0 0 0 0 0 0 0 0
62947- 0 0 0 0 0 0 0 0 0 0 0 0
62948- 0 0 0 0 0 0 0 0 0 0 0 0
62949- 14 14 14 38 38 38 70 70 70 154 122 46
62950-190 142 34 200 144 11 197 138 11 197 138 11
62951-213 154 11 226 170 11 242 186 14 246 190 14
62952-246 190 14 246 190 14 246 190 14 246 190 14
62953-225 175 15 46 32 6 2 2 6 22 22 22
62954-158 158 158 250 250 250 253 253 253 253 253 253
62955-253 253 253 253 253 253 253 253 253 253 253 253
62956-253 253 253 253 253 253 253 253 253 253 253 253
62957-253 253 253 253 253 253 253 253 253 253 253 253
62958-253 253 253 253 253 253 253 253 253 253 253 253
62959-253 253 253 250 250 250 242 242 242 224 178 62
62960-239 182 13 236 186 11 213 154 11 46 32 6
62961- 2 2 6 2 2 6 2 2 6 2 2 6
62962- 2 2 6 2 2 6 61 42 6 225 175 15
62963-238 190 10 236 186 11 112 100 78 42 42 42
62964- 14 14 14 0 0 0 0 0 0 0 0 0
62965- 0 0 0 0 0 0 0 0 0 0 0 0
62966- 0 0 0 0 0 0 0 0 0 0 0 0
62967- 0 0 0 0 0 0 0 0 0 0 0 0
62968- 0 0 0 0 0 0 0 0 0 6 6 6
62969- 22 22 22 54 54 54 154 122 46 213 154 11
62970-226 170 11 230 174 11 226 170 11 226 170 11
62971-236 178 12 242 186 14 246 190 14 246 190 14
62972-246 190 14 246 190 14 246 190 14 246 190 14
62973-241 196 14 184 144 12 10 10 10 2 2 6
62974- 6 6 6 116 116 116 242 242 242 253 253 253
62975-253 253 253 253 253 253 253 253 253 253 253 253
62976-253 253 253 253 253 253 253 253 253 253 253 253
62977-253 253 253 253 253 253 253 253 253 253 253 253
62978-253 253 253 253 253 253 253 253 253 253 253 253
62979-253 253 253 231 231 231 198 198 198 214 170 54
62980-236 178 12 236 178 12 210 150 10 137 92 6
62981- 18 14 6 2 2 6 2 2 6 2 2 6
62982- 6 6 6 70 47 6 200 144 11 236 178 12
62983-239 182 13 239 182 13 124 112 88 58 58 58
62984- 22 22 22 6 6 6 0 0 0 0 0 0
62985- 0 0 0 0 0 0 0 0 0 0 0 0
62986- 0 0 0 0 0 0 0 0 0 0 0 0
62987- 0 0 0 0 0 0 0 0 0 0 0 0
62988- 0 0 0 0 0 0 0 0 0 10 10 10
62989- 30 30 30 70 70 70 180 133 36 226 170 11
62990-239 182 13 242 186 14 242 186 14 246 186 14
62991-246 190 14 246 190 14 246 190 14 246 190 14
62992-246 190 14 246 190 14 246 190 14 246 190 14
62993-246 190 14 232 195 16 98 70 6 2 2 6
62994- 2 2 6 2 2 6 66 66 66 221 221 221
62995-253 253 253 253 253 253 253 253 253 253 253 253
62996-253 253 253 253 253 253 253 253 253 253 253 253
62997-253 253 253 253 253 253 253 253 253 253 253 253
62998-253 253 253 253 253 253 253 253 253 253 253 253
62999-253 253 253 206 206 206 198 198 198 214 166 58
63000-230 174 11 230 174 11 216 158 10 192 133 9
63001-163 110 8 116 81 8 102 78 10 116 81 8
63002-167 114 7 197 138 11 226 170 11 239 182 13
63003-242 186 14 242 186 14 162 146 94 78 78 78
63004- 34 34 34 14 14 14 6 6 6 0 0 0
63005- 0 0 0 0 0 0 0 0 0 0 0 0
63006- 0 0 0 0 0 0 0 0 0 0 0 0
63007- 0 0 0 0 0 0 0 0 0 0 0 0
63008- 0 0 0 0 0 0 0 0 0 6 6 6
63009- 30 30 30 78 78 78 190 142 34 226 170 11
63010-239 182 13 246 190 14 246 190 14 246 190 14
63011-246 190 14 246 190 14 246 190 14 246 190 14
63012-246 190 14 246 190 14 246 190 14 246 190 14
63013-246 190 14 241 196 14 203 166 17 22 18 6
63014- 2 2 6 2 2 6 2 2 6 38 38 38
63015-218 218 218 253 253 253 253 253 253 253 253 253
63016-253 253 253 253 253 253 253 253 253 253 253 253
63017-253 253 253 253 253 253 253 253 253 253 253 253
63018-253 253 253 253 253 253 253 253 253 253 253 253
63019-250 250 250 206 206 206 198 198 198 202 162 69
63020-226 170 11 236 178 12 224 166 10 210 150 10
63021-200 144 11 197 138 11 192 133 9 197 138 11
63022-210 150 10 226 170 11 242 186 14 246 190 14
63023-246 190 14 246 186 14 225 175 15 124 112 88
63024- 62 62 62 30 30 30 14 14 14 6 6 6
63025- 0 0 0 0 0 0 0 0 0 0 0 0
63026- 0 0 0 0 0 0 0 0 0 0 0 0
63027- 0 0 0 0 0 0 0 0 0 0 0 0
63028- 0 0 0 0 0 0 0 0 0 10 10 10
63029- 30 30 30 78 78 78 174 135 50 224 166 10
63030-239 182 13 246 190 14 246 190 14 246 190 14
63031-246 190 14 246 190 14 246 190 14 246 190 14
63032-246 190 14 246 190 14 246 190 14 246 190 14
63033-246 190 14 246 190 14 241 196 14 139 102 15
63034- 2 2 6 2 2 6 2 2 6 2 2 6
63035- 78 78 78 250 250 250 253 253 253 253 253 253
63036-253 253 253 253 253 253 253 253 253 253 253 253
63037-253 253 253 253 253 253 253 253 253 253 253 253
63038-253 253 253 253 253 253 253 253 253 253 253 253
63039-250 250 250 214 214 214 198 198 198 190 150 46
63040-219 162 10 236 178 12 234 174 13 224 166 10
63041-216 158 10 213 154 11 213 154 11 216 158 10
63042-226 170 11 239 182 13 246 190 14 246 190 14
63043-246 190 14 246 190 14 242 186 14 206 162 42
63044-101 101 101 58 58 58 30 30 30 14 14 14
63045- 6 6 6 0 0 0 0 0 0 0 0 0
63046- 0 0 0 0 0 0 0 0 0 0 0 0
63047- 0 0 0 0 0 0 0 0 0 0 0 0
63048- 0 0 0 0 0 0 0 0 0 10 10 10
63049- 30 30 30 74 74 74 174 135 50 216 158 10
63050-236 178 12 246 190 14 246 190 14 246 190 14
63051-246 190 14 246 190 14 246 190 14 246 190 14
63052-246 190 14 246 190 14 246 190 14 246 190 14
63053-246 190 14 246 190 14 241 196 14 226 184 13
63054- 61 42 6 2 2 6 2 2 6 2 2 6
63055- 22 22 22 238 238 238 253 253 253 253 253 253
63056-253 253 253 253 253 253 253 253 253 253 253 253
63057-253 253 253 253 253 253 253 253 253 253 253 253
63058-253 253 253 253 253 253 253 253 253 253 253 253
63059-253 253 253 226 226 226 187 187 187 180 133 36
63060-216 158 10 236 178 12 239 182 13 236 178 12
63061-230 174 11 226 170 11 226 170 11 230 174 11
63062-236 178 12 242 186 14 246 190 14 246 190 14
63063-246 190 14 246 190 14 246 186 14 239 182 13
63064-206 162 42 106 106 106 66 66 66 34 34 34
63065- 14 14 14 6 6 6 0 0 0 0 0 0
63066- 0 0 0 0 0 0 0 0 0 0 0 0
63067- 0 0 0 0 0 0 0 0 0 0 0 0
63068- 0 0 0 0 0 0 0 0 0 6 6 6
63069- 26 26 26 70 70 70 163 133 67 213 154 11
63070-236 178 12 246 190 14 246 190 14 246 190 14
63071-246 190 14 246 190 14 246 190 14 246 190 14
63072-246 190 14 246 190 14 246 190 14 246 190 14
63073-246 190 14 246 190 14 246 190 14 241 196 14
63074-190 146 13 18 14 6 2 2 6 2 2 6
63075- 46 46 46 246 246 246 253 253 253 253 253 253
63076-253 253 253 253 253 253 253 253 253 253 253 253
63077-253 253 253 253 253 253 253 253 253 253 253 253
63078-253 253 253 253 253 253 253 253 253 253 253 253
63079-253 253 253 221 221 221 86 86 86 156 107 11
63080-216 158 10 236 178 12 242 186 14 246 186 14
63081-242 186 14 239 182 13 239 182 13 242 186 14
63082-242 186 14 246 186 14 246 190 14 246 190 14
63083-246 190 14 246 190 14 246 190 14 246 190 14
63084-242 186 14 225 175 15 142 122 72 66 66 66
63085- 30 30 30 10 10 10 0 0 0 0 0 0
63086- 0 0 0 0 0 0 0 0 0 0 0 0
63087- 0 0 0 0 0 0 0 0 0 0 0 0
63088- 0 0 0 0 0 0 0 0 0 6 6 6
63089- 26 26 26 70 70 70 163 133 67 210 150 10
63090-236 178 12 246 190 14 246 190 14 246 190 14
63091-246 190 14 246 190 14 246 190 14 246 190 14
63092-246 190 14 246 190 14 246 190 14 246 190 14
63093-246 190 14 246 190 14 246 190 14 246 190 14
63094-232 195 16 121 92 8 34 34 34 106 106 106
63095-221 221 221 253 253 253 253 253 253 253 253 253
63096-253 253 253 253 253 253 253 253 253 253 253 253
63097-253 253 253 253 253 253 253 253 253 253 253 253
63098-253 253 253 253 253 253 253 253 253 253 253 253
63099-242 242 242 82 82 82 18 14 6 163 110 8
63100-216 158 10 236 178 12 242 186 14 246 190 14
63101-246 190 14 246 190 14 246 190 14 246 190 14
63102-246 190 14 246 190 14 246 190 14 246 190 14
63103-246 190 14 246 190 14 246 190 14 246 190 14
63104-246 190 14 246 190 14 242 186 14 163 133 67
63105- 46 46 46 18 18 18 6 6 6 0 0 0
63106- 0 0 0 0 0 0 0 0 0 0 0 0
63107- 0 0 0 0 0 0 0 0 0 0 0 0
63108- 0 0 0 0 0 0 0 0 0 10 10 10
63109- 30 30 30 78 78 78 163 133 67 210 150 10
63110-236 178 12 246 186 14 246 190 14 246 190 14
63111-246 190 14 246 190 14 246 190 14 246 190 14
63112-246 190 14 246 190 14 246 190 14 246 190 14
63113-246 190 14 246 190 14 246 190 14 246 190 14
63114-241 196 14 215 174 15 190 178 144 253 253 253
63115-253 253 253 253 253 253 253 253 253 253 253 253
63116-253 253 253 253 253 253 253 253 253 253 253 253
63117-253 253 253 253 253 253 253 253 253 253 253 253
63118-253 253 253 253 253 253 253 253 253 218 218 218
63119- 58 58 58 2 2 6 22 18 6 167 114 7
63120-216 158 10 236 178 12 246 186 14 246 190 14
63121-246 190 14 246 190 14 246 190 14 246 190 14
63122-246 190 14 246 190 14 246 190 14 246 190 14
63123-246 190 14 246 190 14 246 190 14 246 190 14
63124-246 190 14 246 186 14 242 186 14 190 150 46
63125- 54 54 54 22 22 22 6 6 6 0 0 0
63126- 0 0 0 0 0 0 0 0 0 0 0 0
63127- 0 0 0 0 0 0 0 0 0 0 0 0
63128- 0 0 0 0 0 0 0 0 0 14 14 14
63129- 38 38 38 86 86 86 180 133 36 213 154 11
63130-236 178 12 246 186 14 246 190 14 246 190 14
63131-246 190 14 246 190 14 246 190 14 246 190 14
63132-246 190 14 246 190 14 246 190 14 246 190 14
63133-246 190 14 246 190 14 246 190 14 246 190 14
63134-246 190 14 232 195 16 190 146 13 214 214 214
63135-253 253 253 253 253 253 253 253 253 253 253 253
63136-253 253 253 253 253 253 253 253 253 253 253 253
63137-253 253 253 253 253 253 253 253 253 253 253 253
63138-253 253 253 250 250 250 170 170 170 26 26 26
63139- 2 2 6 2 2 6 37 26 9 163 110 8
63140-219 162 10 239 182 13 246 186 14 246 190 14
63141-246 190 14 246 190 14 246 190 14 246 190 14
63142-246 190 14 246 190 14 246 190 14 246 190 14
63143-246 190 14 246 190 14 246 190 14 246 190 14
63144-246 186 14 236 178 12 224 166 10 142 122 72
63145- 46 46 46 18 18 18 6 6 6 0 0 0
63146- 0 0 0 0 0 0 0 0 0 0 0 0
63147- 0 0 0 0 0 0 0 0 0 0 0 0
63148- 0 0 0 0 0 0 6 6 6 18 18 18
63149- 50 50 50 109 106 95 192 133 9 224 166 10
63150-242 186 14 246 190 14 246 190 14 246 190 14
63151-246 190 14 246 190 14 246 190 14 246 190 14
63152-246 190 14 246 190 14 246 190 14 246 190 14
63153-246 190 14 246 190 14 246 190 14 246 190 14
63154-242 186 14 226 184 13 210 162 10 142 110 46
63155-226 226 226 253 253 253 253 253 253 253 253 253
63156-253 253 253 253 253 253 253 253 253 253 253 253
63157-253 253 253 253 253 253 253 253 253 253 253 253
63158-198 198 198 66 66 66 2 2 6 2 2 6
63159- 2 2 6 2 2 6 50 34 6 156 107 11
63160-219 162 10 239 182 13 246 186 14 246 190 14
63161-246 190 14 246 190 14 246 190 14 246 190 14
63162-246 190 14 246 190 14 246 190 14 246 190 14
63163-246 190 14 246 190 14 246 190 14 242 186 14
63164-234 174 13 213 154 11 154 122 46 66 66 66
63165- 30 30 30 10 10 10 0 0 0 0 0 0
63166- 0 0 0 0 0 0 0 0 0 0 0 0
63167- 0 0 0 0 0 0 0 0 0 0 0 0
63168- 0 0 0 0 0 0 6 6 6 22 22 22
63169- 58 58 58 154 121 60 206 145 10 234 174 13
63170-242 186 14 246 186 14 246 190 14 246 190 14
63171-246 190 14 246 190 14 246 190 14 246 190 14
63172-246 190 14 246 190 14 246 190 14 246 190 14
63173-246 190 14 246 190 14 246 190 14 246 190 14
63174-246 186 14 236 178 12 210 162 10 163 110 8
63175- 61 42 6 138 138 138 218 218 218 250 250 250
63176-253 253 253 253 253 253 253 253 253 250 250 250
63177-242 242 242 210 210 210 144 144 144 66 66 66
63178- 6 6 6 2 2 6 2 2 6 2 2 6
63179- 2 2 6 2 2 6 61 42 6 163 110 8
63180-216 158 10 236 178 12 246 190 14 246 190 14
63181-246 190 14 246 190 14 246 190 14 246 190 14
63182-246 190 14 246 190 14 246 190 14 246 190 14
63183-246 190 14 239 182 13 230 174 11 216 158 10
63184-190 142 34 124 112 88 70 70 70 38 38 38
63185- 18 18 18 6 6 6 0 0 0 0 0 0
63186- 0 0 0 0 0 0 0 0 0 0 0 0
63187- 0 0 0 0 0 0 0 0 0 0 0 0
63188- 0 0 0 0 0 0 6 6 6 22 22 22
63189- 62 62 62 168 124 44 206 145 10 224 166 10
63190-236 178 12 239 182 13 242 186 14 242 186 14
63191-246 186 14 246 190 14 246 190 14 246 190 14
63192-246 190 14 246 190 14 246 190 14 246 190 14
63193-246 190 14 246 190 14 246 190 14 246 190 14
63194-246 190 14 236 178 12 216 158 10 175 118 6
63195- 80 54 7 2 2 6 6 6 6 30 30 30
63196- 54 54 54 62 62 62 50 50 50 38 38 38
63197- 14 14 14 2 2 6 2 2 6 2 2 6
63198- 2 2 6 2 2 6 2 2 6 2 2 6
63199- 2 2 6 6 6 6 80 54 7 167 114 7
63200-213 154 11 236 178 12 246 190 14 246 190 14
63201-246 190 14 246 190 14 246 190 14 246 190 14
63202-246 190 14 242 186 14 239 182 13 239 182 13
63203-230 174 11 210 150 10 174 135 50 124 112 88
63204- 82 82 82 54 54 54 34 34 34 18 18 18
63205- 6 6 6 0 0 0 0 0 0 0 0 0
63206- 0 0 0 0 0 0 0 0 0 0 0 0
63207- 0 0 0 0 0 0 0 0 0 0 0 0
63208- 0 0 0 0 0 0 6 6 6 18 18 18
63209- 50 50 50 158 118 36 192 133 9 200 144 11
63210-216 158 10 219 162 10 224 166 10 226 170 11
63211-230 174 11 236 178 12 239 182 13 239 182 13
63212-242 186 14 246 186 14 246 190 14 246 190 14
63213-246 190 14 246 190 14 246 190 14 246 190 14
63214-246 186 14 230 174 11 210 150 10 163 110 8
63215-104 69 6 10 10 10 2 2 6 2 2 6
63216- 2 2 6 2 2 6 2 2 6 2 2 6
63217- 2 2 6 2 2 6 2 2 6 2 2 6
63218- 2 2 6 2 2 6 2 2 6 2 2 6
63219- 2 2 6 6 6 6 91 60 6 167 114 7
63220-206 145 10 230 174 11 242 186 14 246 190 14
63221-246 190 14 246 190 14 246 186 14 242 186 14
63222-239 182 13 230 174 11 224 166 10 213 154 11
63223-180 133 36 124 112 88 86 86 86 58 58 58
63224- 38 38 38 22 22 22 10 10 10 6 6 6
63225- 0 0 0 0 0 0 0 0 0 0 0 0
63226- 0 0 0 0 0 0 0 0 0 0 0 0
63227- 0 0 0 0 0 0 0 0 0 0 0 0
63228- 0 0 0 0 0 0 0 0 0 14 14 14
63229- 34 34 34 70 70 70 138 110 50 158 118 36
63230-167 114 7 180 123 7 192 133 9 197 138 11
63231-200 144 11 206 145 10 213 154 11 219 162 10
63232-224 166 10 230 174 11 239 182 13 242 186 14
63233-246 186 14 246 186 14 246 186 14 246 186 14
63234-239 182 13 216 158 10 185 133 11 152 99 6
63235-104 69 6 18 14 6 2 2 6 2 2 6
63236- 2 2 6 2 2 6 2 2 6 2 2 6
63237- 2 2 6 2 2 6 2 2 6 2 2 6
63238- 2 2 6 2 2 6 2 2 6 2 2 6
63239- 2 2 6 6 6 6 80 54 7 152 99 6
63240-192 133 9 219 162 10 236 178 12 239 182 13
63241-246 186 14 242 186 14 239 182 13 236 178 12
63242-224 166 10 206 145 10 192 133 9 154 121 60
63243- 94 94 94 62 62 62 42 42 42 22 22 22
63244- 14 14 14 6 6 6 0 0 0 0 0 0
63245- 0 0 0 0 0 0 0 0 0 0 0 0
63246- 0 0 0 0 0 0 0 0 0 0 0 0
63247- 0 0 0 0 0 0 0 0 0 0 0 0
63248- 0 0 0 0 0 0 0 0 0 6 6 6
63249- 18 18 18 34 34 34 58 58 58 78 78 78
63250-101 98 89 124 112 88 142 110 46 156 107 11
63251-163 110 8 167 114 7 175 118 6 180 123 7
63252-185 133 11 197 138 11 210 150 10 219 162 10
63253-226 170 11 236 178 12 236 178 12 234 174 13
63254-219 162 10 197 138 11 163 110 8 130 83 6
63255- 91 60 6 10 10 10 2 2 6 2 2 6
63256- 18 18 18 38 38 38 38 38 38 38 38 38
63257- 38 38 38 38 38 38 38 38 38 38 38 38
63258- 38 38 38 38 38 38 26 26 26 2 2 6
63259- 2 2 6 6 6 6 70 47 6 137 92 6
63260-175 118 6 200 144 11 219 162 10 230 174 11
63261-234 174 13 230 174 11 219 162 10 210 150 10
63262-192 133 9 163 110 8 124 112 88 82 82 82
63263- 50 50 50 30 30 30 14 14 14 6 6 6
63264- 0 0 0 0 0 0 0 0 0 0 0 0
63265- 0 0 0 0 0 0 0 0 0 0 0 0
63266- 0 0 0 0 0 0 0 0 0 0 0 0
63267- 0 0 0 0 0 0 0 0 0 0 0 0
63268- 0 0 0 0 0 0 0 0 0 0 0 0
63269- 6 6 6 14 14 14 22 22 22 34 34 34
63270- 42 42 42 58 58 58 74 74 74 86 86 86
63271-101 98 89 122 102 70 130 98 46 121 87 25
63272-137 92 6 152 99 6 163 110 8 180 123 7
63273-185 133 11 197 138 11 206 145 10 200 144 11
63274-180 123 7 156 107 11 130 83 6 104 69 6
63275- 50 34 6 54 54 54 110 110 110 101 98 89
63276- 86 86 86 82 82 82 78 78 78 78 78 78
63277- 78 78 78 78 78 78 78 78 78 78 78 78
63278- 78 78 78 82 82 82 86 86 86 94 94 94
63279-106 106 106 101 101 101 86 66 34 124 80 6
63280-156 107 11 180 123 7 192 133 9 200 144 11
63281-206 145 10 200 144 11 192 133 9 175 118 6
63282-139 102 15 109 106 95 70 70 70 42 42 42
63283- 22 22 22 10 10 10 0 0 0 0 0 0
63284- 0 0 0 0 0 0 0 0 0 0 0 0
63285- 0 0 0 0 0 0 0 0 0 0 0 0
63286- 0 0 0 0 0 0 0 0 0 0 0 0
63287- 0 0 0 0 0 0 0 0 0 0 0 0
63288- 0 0 0 0 0 0 0 0 0 0 0 0
63289- 0 0 0 0 0 0 6 6 6 10 10 10
63290- 14 14 14 22 22 22 30 30 30 38 38 38
63291- 50 50 50 62 62 62 74 74 74 90 90 90
63292-101 98 89 112 100 78 121 87 25 124 80 6
63293-137 92 6 152 99 6 152 99 6 152 99 6
63294-138 86 6 124 80 6 98 70 6 86 66 30
63295-101 98 89 82 82 82 58 58 58 46 46 46
63296- 38 38 38 34 34 34 34 34 34 34 34 34
63297- 34 34 34 34 34 34 34 34 34 34 34 34
63298- 34 34 34 34 34 34 38 38 38 42 42 42
63299- 54 54 54 82 82 82 94 86 76 91 60 6
63300-134 86 6 156 107 11 167 114 7 175 118 6
63301-175 118 6 167 114 7 152 99 6 121 87 25
63302-101 98 89 62 62 62 34 34 34 18 18 18
63303- 6 6 6 0 0 0 0 0 0 0 0 0
63304- 0 0 0 0 0 0 0 0 0 0 0 0
63305- 0 0 0 0 0 0 0 0 0 0 0 0
63306- 0 0 0 0 0 0 0 0 0 0 0 0
63307- 0 0 0 0 0 0 0 0 0 0 0 0
63308- 0 0 0 0 0 0 0 0 0 0 0 0
63309- 0 0 0 0 0 0 0 0 0 0 0 0
63310- 0 0 0 6 6 6 6 6 6 10 10 10
63311- 18 18 18 22 22 22 30 30 30 42 42 42
63312- 50 50 50 66 66 66 86 86 86 101 98 89
63313-106 86 58 98 70 6 104 69 6 104 69 6
63314-104 69 6 91 60 6 82 62 34 90 90 90
63315- 62 62 62 38 38 38 22 22 22 14 14 14
63316- 10 10 10 10 10 10 10 10 10 10 10 10
63317- 10 10 10 10 10 10 6 6 6 10 10 10
63318- 10 10 10 10 10 10 10 10 10 14 14 14
63319- 22 22 22 42 42 42 70 70 70 89 81 66
63320- 80 54 7 104 69 6 124 80 6 137 92 6
63321-134 86 6 116 81 8 100 82 52 86 86 86
63322- 58 58 58 30 30 30 14 14 14 6 6 6
63323- 0 0 0 0 0 0 0 0 0 0 0 0
63324- 0 0 0 0 0 0 0 0 0 0 0 0
63325- 0 0 0 0 0 0 0 0 0 0 0 0
63326- 0 0 0 0 0 0 0 0 0 0 0 0
63327- 0 0 0 0 0 0 0 0 0 0 0 0
63328- 0 0 0 0 0 0 0 0 0 0 0 0
63329- 0 0 0 0 0 0 0 0 0 0 0 0
63330- 0 0 0 0 0 0 0 0 0 0 0 0
63331- 0 0 0 6 6 6 10 10 10 14 14 14
63332- 18 18 18 26 26 26 38 38 38 54 54 54
63333- 70 70 70 86 86 86 94 86 76 89 81 66
63334- 89 81 66 86 86 86 74 74 74 50 50 50
63335- 30 30 30 14 14 14 6 6 6 0 0 0
63336- 0 0 0 0 0 0 0 0 0 0 0 0
63337- 0 0 0 0 0 0 0 0 0 0 0 0
63338- 0 0 0 0 0 0 0 0 0 0 0 0
63339- 6 6 6 18 18 18 34 34 34 58 58 58
63340- 82 82 82 89 81 66 89 81 66 89 81 66
63341- 94 86 66 94 86 76 74 74 74 50 50 50
63342- 26 26 26 14 14 14 6 6 6 0 0 0
63343- 0 0 0 0 0 0 0 0 0 0 0 0
63344- 0 0 0 0 0 0 0 0 0 0 0 0
63345- 0 0 0 0 0 0 0 0 0 0 0 0
63346- 0 0 0 0 0 0 0 0 0 0 0 0
63347- 0 0 0 0 0 0 0 0 0 0 0 0
63348- 0 0 0 0 0 0 0 0 0 0 0 0
63349- 0 0 0 0 0 0 0 0 0 0 0 0
63350- 0 0 0 0 0 0 0 0 0 0 0 0
63351- 0 0 0 0 0 0 0 0 0 0 0 0
63352- 6 6 6 6 6 6 14 14 14 18 18 18
63353- 30 30 30 38 38 38 46 46 46 54 54 54
63354- 50 50 50 42 42 42 30 30 30 18 18 18
63355- 10 10 10 0 0 0 0 0 0 0 0 0
63356- 0 0 0 0 0 0 0 0 0 0 0 0
63357- 0 0 0 0 0 0 0 0 0 0 0 0
63358- 0 0 0 0 0 0 0 0 0 0 0 0
63359- 0 0 0 6 6 6 14 14 14 26 26 26
63360- 38 38 38 50 50 50 58 58 58 58 58 58
63361- 54 54 54 42 42 42 30 30 30 18 18 18
63362- 10 10 10 0 0 0 0 0 0 0 0 0
63363- 0 0 0 0 0 0 0 0 0 0 0 0
63364- 0 0 0 0 0 0 0 0 0 0 0 0
63365- 0 0 0 0 0 0 0 0 0 0 0 0
63366- 0 0 0 0 0 0 0 0 0 0 0 0
63367- 0 0 0 0 0 0 0 0 0 0 0 0
63368- 0 0 0 0 0 0 0 0 0 0 0 0
63369- 0 0 0 0 0 0 0 0 0 0 0 0
63370- 0 0 0 0 0 0 0 0 0 0 0 0
63371- 0 0 0 0 0 0 0 0 0 0 0 0
63372- 0 0 0 0 0 0 0 0 0 6 6 6
63373- 6 6 6 10 10 10 14 14 14 18 18 18
63374- 18 18 18 14 14 14 10 10 10 6 6 6
63375- 0 0 0 0 0 0 0 0 0 0 0 0
63376- 0 0 0 0 0 0 0 0 0 0 0 0
63377- 0 0 0 0 0 0 0 0 0 0 0 0
63378- 0 0 0 0 0 0 0 0 0 0 0 0
63379- 0 0 0 0 0 0 0 0 0 6 6 6
63380- 14 14 14 18 18 18 22 22 22 22 22 22
63381- 18 18 18 14 14 14 10 10 10 6 6 6
63382- 0 0 0 0 0 0 0 0 0 0 0 0
63383- 0 0 0 0 0 0 0 0 0 0 0 0
63384- 0 0 0 0 0 0 0 0 0 0 0 0
63385- 0 0 0 0 0 0 0 0 0 0 0 0
63386- 0 0 0 0 0 0 0 0 0 0 0 0
63387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63388+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63389+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63395+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63396+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63397+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63398+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63400+4 4 4 4 4 4
63401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63412+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63414+4 4 4 4 4 4
63415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63426+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63428+4 4 4 4 4 4
63429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63442+4 4 4 4 4 4
63443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63456+4 4 4 4 4 4
63457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63465+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63470+4 4 4 4 4 4
63471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63475+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
63476+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
63477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63479+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63480+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
63481+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
63482+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
63483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63484+4 4 4 4 4 4
63485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63487+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63489+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
63490+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
63491+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63494+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
63495+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
63496+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
63497+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63498+4 4 4 4 4 4
63499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63503+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
63504+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
63505+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
63506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63508+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
63509+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
63510+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
63511+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
63512+4 4 4 4 4 4
63513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63516+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
63517+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
63518+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
63519+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
63520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63521+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
63522+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
63523+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
63524+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
63525+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
63526+4 4 4 4 4 4
63527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63528+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63530+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
63531+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
63532+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
63533+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
63534+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
63535+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
63536+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
63537+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
63538+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
63539+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
63540+4 4 4 4 4 4
63541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63542+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
63544+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
63545+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
63546+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
63547+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
63548+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
63549+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
63550+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
63551+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
63552+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
63553+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
63554+4 4 4 4 4 4
63555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63556+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63557+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
63558+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
63559+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
63560+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
63561+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
63562+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
63563+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
63564+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
63565+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
63566+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
63567+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
63568+4 4 4 4 4 4
63569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63570+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63571+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
63572+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
63573+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
63574+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
63575+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
63576+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
63577+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
63578+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
63579+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
63580+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
63581+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
63582+4 4 4 4 4 4
63583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63585+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
63586+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
63587+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
63588+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
63589+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
63590+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
63591+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
63592+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
63593+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
63594+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
63595+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
63596+4 4 4 4 4 4
63597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63599+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
63600+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
63601+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
63602+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
63603+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
63604+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
63605+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
63606+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
63607+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
63608+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
63609+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
63610+4 4 4 4 4 4
63611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63612+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
63613+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
63614+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
63615+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
63616+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
63617+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
63618+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
63619+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
63620+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
63621+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
63622+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
63623+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
63624+4 4 4 4 4 4
63625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63626+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
63627+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
63628+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
63629+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
63630+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
63631+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
63632+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
63633+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
63634+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
63635+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
63636+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
63637+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
63638+0 0 0 4 4 4
63639+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
63640+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
63641+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
63642+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
63643+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
63644+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
63645+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
63646+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
63647+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
63648+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
63649+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
63650+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
63651+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
63652+2 0 0 0 0 0
63653+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
63654+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
63655+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
63656+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
63657+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
63658+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
63659+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
63660+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
63661+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
63662+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
63663+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
63664+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
63665+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
63666+37 38 37 0 0 0
63667+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
63668+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
63669+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
63670+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
63671+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
63672+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
63673+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
63674+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
63675+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
63676+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
63677+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
63678+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
63679+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
63680+85 115 134 4 0 0
63681+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
63682+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
63683+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
63684+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
63685+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
63686+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
63687+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
63688+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
63689+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
63690+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
63691+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
63692+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
63693+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
63694+60 73 81 4 0 0
63695+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
63696+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
63697+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
63698+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
63699+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
63700+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
63701+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
63702+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
63703+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
63704+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
63705+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
63706+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
63707+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
63708+16 19 21 4 0 0
63709+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
63710+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
63711+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
63712+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
63713+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
63714+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
63715+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
63716+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
63717+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
63718+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
63719+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
63720+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
63721+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
63722+4 0 0 4 3 3
63723+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
63724+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
63725+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
63726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
63727+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
63728+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
63729+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
63730+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
63731+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
63732+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
63733+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
63734+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
63735+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
63736+3 2 2 4 4 4
63737+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
63738+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
63739+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
63740+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
63741+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
63742+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
63743+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
63744+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
63745+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
63746+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
63747+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
63748+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
63749+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
63750+4 4 4 4 4 4
63751+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
63752+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
63753+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
63754+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
63755+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
63756+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
63757+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
63758+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
63759+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
63760+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
63761+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
63762+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
63763+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
63764+4 4 4 4 4 4
63765+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
63766+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
63767+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
63768+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
63769+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
63770+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
63771+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
63772+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
63773+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
63774+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
63775+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
63776+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
63777+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
63778+5 5 5 5 5 5
63779+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
63780+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
63781+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
63782+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
63783+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
63784+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63785+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
63786+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
63787+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
63788+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
63789+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
63790+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
63791+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
63792+5 5 5 4 4 4
63793+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
63794+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
63795+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
63796+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
63797+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
63798+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
63799+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
63800+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
63801+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
63802+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
63803+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
63804+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
63805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63806+4 4 4 4 4 4
63807+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
63808+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
63809+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
63810+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
63811+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
63812+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63813+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63814+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
63815+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
63816+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
63817+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
63818+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
63819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63820+4 4 4 4 4 4
63821+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
63822+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
63823+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
63824+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
63825+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
63826+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
63827+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
63828+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
63829+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
63830+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
63831+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
63832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63834+4 4 4 4 4 4
63835+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
63836+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
63837+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
63838+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
63839+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
63840+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63841+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63842+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
63843+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
63844+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
63845+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
63846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63848+4 4 4 4 4 4
63849+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
63850+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
63851+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
63852+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
63853+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
63854+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
63855+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
63856+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
63857+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
63858+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
63859+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63862+4 4 4 4 4 4
63863+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
63864+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
63865+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
63866+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
63867+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
63868+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
63869+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
63870+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
63871+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
63872+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
63873+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
63874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63876+4 4 4 4 4 4
63877+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
63878+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
63879+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
63880+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
63881+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
63882+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
63883+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
63884+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
63885+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
63886+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
63887+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
63888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63890+4 4 4 4 4 4
63891+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
63892+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
63893+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
63894+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
63895+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
63896+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
63897+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
63898+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
63899+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
63900+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
63901+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63904+4 4 4 4 4 4
63905+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
63906+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
63907+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
63908+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
63909+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63910+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
63911+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
63912+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
63913+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
63914+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
63915+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63918+4 4 4 4 4 4
63919+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
63920+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
63921+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
63922+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
63923+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63924+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
63925+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
63926+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
63927+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
63928+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
63929+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63932+4 4 4 4 4 4
63933+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
63934+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
63935+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
63936+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
63937+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63938+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
63939+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
63940+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
63941+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
63942+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63943+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63945+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63946+4 4 4 4 4 4
63947+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
63948+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
63949+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
63950+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
63951+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
63952+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
63953+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
63954+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
63955+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
63956+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63957+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63960+4 4 4 4 4 4
63961+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
63962+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
63963+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
63964+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
63965+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63966+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
63967+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
63968+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
63969+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
63970+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63971+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63974+4 4 4 4 4 4
63975+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
63976+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
63977+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
63978+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
63979+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
63980+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
63981+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
63982+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
63983+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
63984+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63985+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63988+4 4 4 4 4 4
63989+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
63990+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
63991+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
63992+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
63993+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
63994+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
63995+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
63996+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
63997+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
63998+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63999+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64002+4 4 4 4 4 4
64003+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
64004+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
64005+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
64006+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
64007+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
64008+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
64009+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
64010+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
64011+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
64012+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64013+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64016+4 4 4 4 4 4
64017+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
64018+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
64019+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
64020+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
64021+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
64022+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
64023+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
64024+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
64025+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
64026+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64027+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64030+4 4 4 4 4 4
64031+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
64032+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
64033+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
64034+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
64035+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
64036+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
64037+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
64038+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
64039+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
64040+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64041+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64044+4 4 4 4 4 4
64045+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
64046+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
64047+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
64048+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
64049+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
64050+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
64051+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
64052+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
64053+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
64054+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64055+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64058+4 4 4 4 4 4
64059+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
64060+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
64061+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
64062+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
64063+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
64064+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
64065+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
64066+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
64067+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
64068+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64069+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64072+4 4 4 4 4 4
64073+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
64074+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
64075+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
64076+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
64077+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
64078+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
64079+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
64080+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
64081+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
64082+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64083+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64086+4 4 4 4 4 4
64087+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
64088+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
64089+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
64090+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
64091+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
64092+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
64093+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
64094+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
64095+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
64096+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64097+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64100+4 4 4 4 4 4
64101+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
64102+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
64103+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
64104+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
64105+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
64106+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
64107+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
64108+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
64109+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
64110+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64111+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64114+4 4 4 4 4 4
64115+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
64116+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
64117+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
64118+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
64119+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
64120+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
64121+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
64122+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
64123+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
64124+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64125+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64128+4 4 4 4 4 4
64129+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
64130+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
64131+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
64132+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
64133+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
64134+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
64135+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
64136+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
64137+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
64138+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64139+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64142+4 4 4 4 4 4
64143+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
64144+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
64145+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
64146+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
64147+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
64148+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
64149+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
64150+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
64151+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
64152+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
64153+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64156+4 4 4 4 4 4
64157+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
64158+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
64159+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
64160+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
64161+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
64162+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
64163+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
64164+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
64165+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
64166+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
64167+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64170+4 4 4 4 4 4
64171+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
64172+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
64173+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
64174+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
64175+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
64176+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
64177+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64178+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
64179+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
64180+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
64181+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
64182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64184+4 4 4 4 4 4
64185+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
64186+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
64187+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
64188+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
64189+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
64190+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
64191+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
64192+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
64193+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
64194+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
64195+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64198+4 4 4 4 4 4
64199+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
64200+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
64201+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
64202+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
64203+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
64204+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
64205+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
64206+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
64207+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
64208+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
64209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64212+4 4 4 4 4 4
64213+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
64214+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
64215+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
64216+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
64217+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
64218+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
64219+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
64220+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
64221+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
64222+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
64223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64226+4 4 4 4 4 4
64227+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
64228+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
64229+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
64230+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
64231+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
64232+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
64233+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
64234+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
64235+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
64236+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
64237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64240+4 4 4 4 4 4
64241+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
64242+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
64243+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
64244+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
64245+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
64246+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
64247+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
64248+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
64249+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
64250+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
64251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64254+4 4 4 4 4 4
64255+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
64256+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
64257+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
64258+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
64259+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
64260+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
64261+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
64262+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
64263+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
64264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64268+4 4 4 4 4 4
64269+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
64270+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
64271+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
64272+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
64273+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
64274+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
64275+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
64276+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
64277+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
64278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64282+4 4 4 4 4 4
64283+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
64284+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
64285+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
64286+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
64287+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
64288+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
64289+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
64290+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
64291+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64296+4 4 4 4 4 4
64297+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
64298+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
64299+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
64300+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
64301+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
64302+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
64303+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
64304+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
64305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64310+4 4 4 4 4 4
64311+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
64312+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
64313+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
64314+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
64315+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
64316+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
64317+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
64318+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
64319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64320+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64324+4 4 4 4 4 4
64325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
64326+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
64327+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
64328+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
64329+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
64330+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
64331+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
64332+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
64333+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64334+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64338+4 4 4 4 4 4
64339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64340+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
64341+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
64342+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
64343+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
64344+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
64345+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
64346+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
64347+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64348+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64352+4 4 4 4 4 4
64353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64354+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
64355+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
64356+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
64357+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
64358+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
64359+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
64360+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
64361+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64362+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64366+4 4 4 4 4 4
64367+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64368+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64369+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
64370+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
64371+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
64372+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
64373+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
64374+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
64375+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64376+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64380+4 4 4 4 4 4
64381+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64382+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64383+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
64384+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
64385+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
64386+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
64387+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
64388+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64389+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64394+4 4 4 4 4 4
64395+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64396+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64397+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64398+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
64399+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
64400+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
64401+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
64402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64408+4 4 4 4 4 4
64409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64412+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
64413+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
64414+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
64415+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
64416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64422+4 4 4 4 4 4
64423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64426+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
64427+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
64428+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
64429+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
64430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64436+4 4 4 4 4 4
64437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
64441+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
64442+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
64443+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64450+4 4 4 4 4 4
64451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64455+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
64456+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
64457+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
64458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64464+4 4 4 4 4 4
64465+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64469+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
64470+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
64471+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64478+4 4 4 4 4 4
64479+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64480+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64481+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64482+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64483+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
64484+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
64485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64487+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64492+4 4 4 4 4 4
64493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64497+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
64498+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
64499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64506+4 4 4 4 4 4
64507diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
64508index 443e3c8..c443d6a 100644
64509--- a/drivers/video/nvidia/nv_backlight.c
64510+++ b/drivers/video/nvidia/nv_backlight.c
64511@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
64512 return bd->props.brightness;
64513 }
64514
64515-static struct backlight_ops nvidia_bl_ops = {
64516+static const struct backlight_ops nvidia_bl_ops = {
64517 .get_brightness = nvidia_bl_get_brightness,
64518 .update_status = nvidia_bl_update_status,
64519 };
64520diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
64521index d94c57f..912984c 100644
64522--- a/drivers/video/riva/fbdev.c
64523+++ b/drivers/video/riva/fbdev.c
64524@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
64525 return bd->props.brightness;
64526 }
64527
64528-static struct backlight_ops riva_bl_ops = {
64529+static const struct backlight_ops riva_bl_ops = {
64530 .get_brightness = riva_bl_get_brightness,
64531 .update_status = riva_bl_update_status,
64532 };
64533diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
64534index 54fbb29..2c108fc 100644
64535--- a/drivers/video/uvesafb.c
64536+++ b/drivers/video/uvesafb.c
64537@@ -18,6 +18,7 @@
64538 #include <linux/fb.h>
64539 #include <linux/io.h>
64540 #include <linux/mutex.h>
64541+#include <linux/moduleloader.h>
64542 #include <video/edid.h>
64543 #include <video/uvesafb.h>
64544 #ifdef CONFIG_X86
64545@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
64546 NULL,
64547 };
64548
64549- return call_usermodehelper(v86d_path, argv, envp, 1);
64550+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
64551 }
64552
64553 /*
64554@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
64555 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
64556 par->pmi_setpal = par->ypan = 0;
64557 } else {
64558+
64559+#ifdef CONFIG_PAX_KERNEXEC
64560+#ifdef CONFIG_MODULES
64561+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
64562+#endif
64563+ if (!par->pmi_code) {
64564+ par->pmi_setpal = par->ypan = 0;
64565+ return 0;
64566+ }
64567+#endif
64568+
64569 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
64570 + task->t.regs.edi);
64571+
64572+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64573+ pax_open_kernel();
64574+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
64575+ pax_close_kernel();
64576+
64577+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
64578+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
64579+#else
64580 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
64581 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
64582+#endif
64583+
64584 printk(KERN_INFO "uvesafb: protected mode interface info at "
64585 "%04x:%04x\n",
64586 (u16)task->t.regs.es, (u16)task->t.regs.edi);
64587@@ -1799,6 +1822,11 @@ out:
64588 if (par->vbe_modes)
64589 kfree(par->vbe_modes);
64590
64591+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64592+ if (par->pmi_code)
64593+ module_free_exec(NULL, par->pmi_code);
64594+#endif
64595+
64596 framebuffer_release(info);
64597 return err;
64598 }
64599@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev)
64600 kfree(par->vbe_state_orig);
64601 if (par->vbe_state_saved)
64602 kfree(par->vbe_state_saved);
64603+
64604+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64605+ if (par->pmi_code)
64606+ module_free_exec(NULL, par->pmi_code);
64607+#endif
64608+
64609 }
64610
64611 framebuffer_release(info);
64612diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
64613index bd37ee1..cb827e8 100644
64614--- a/drivers/video/vesafb.c
64615+++ b/drivers/video/vesafb.c
64616@@ -9,6 +9,7 @@
64617 */
64618
64619 #include <linux/module.h>
64620+#include <linux/moduleloader.h>
64621 #include <linux/kernel.h>
64622 #include <linux/errno.h>
64623 #include <linux/string.h>
64624@@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
64625 static int vram_total __initdata; /* Set total amount of memory */
64626 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
64627 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
64628-static void (*pmi_start)(void) __read_mostly;
64629-static void (*pmi_pal) (void) __read_mostly;
64630+static void (*pmi_start)(void) __read_only;
64631+static void (*pmi_pal) (void) __read_only;
64632 static int depth __read_mostly;
64633 static int vga_compat __read_mostly;
64634 /* --------------------------------------------------------------------- */
64635@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
64636 unsigned int size_vmode;
64637 unsigned int size_remap;
64638 unsigned int size_total;
64639+ void *pmi_code = NULL;
64640
64641 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
64642 return -ENODEV;
64643@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
64644 size_remap = size_total;
64645 vesafb_fix.smem_len = size_remap;
64646
64647-#ifndef __i386__
64648- screen_info.vesapm_seg = 0;
64649-#endif
64650-
64651 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
64652 printk(KERN_WARNING
64653 "vesafb: cannot reserve video memory at 0x%lx\n",
64654@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev)
64655 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
64656 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
64657
64658+#ifdef __i386__
64659+
64660+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64661+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
64662+ if (!pmi_code)
64663+#elif !defined(CONFIG_PAX_KERNEXEC)
64664+ if (0)
64665+#endif
64666+
64667+#endif
64668+ screen_info.vesapm_seg = 0;
64669+
64670 if (screen_info.vesapm_seg) {
64671- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
64672- screen_info.vesapm_seg,screen_info.vesapm_off);
64673+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
64674+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
64675 }
64676
64677 if (screen_info.vesapm_seg < 0xc000)
64678@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev)
64679
64680 if (ypan || pmi_setpal) {
64681 unsigned short *pmi_base;
64682+
64683 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
64684- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
64685- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
64686+
64687+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64688+ pax_open_kernel();
64689+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
64690+#else
64691+ pmi_code = pmi_base;
64692+#endif
64693+
64694+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
64695+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
64696+
64697+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64698+ pmi_start = ktva_ktla(pmi_start);
64699+ pmi_pal = ktva_ktla(pmi_pal);
64700+ pax_close_kernel();
64701+#endif
64702+
64703 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
64704 if (pmi_base[3]) {
64705 printk(KERN_INFO "vesafb: pmi: ports = ");
64706@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev)
64707 info->node, info->fix.id);
64708 return 0;
64709 err:
64710+
64711+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64712+ module_free_exec(NULL, pmi_code);
64713+#endif
64714+
64715 if (info->screen_base)
64716 iounmap(info->screen_base);
64717 framebuffer_release(info);
64718diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
64719index 88a60e0..6783cc2 100644
64720--- a/drivers/xen/sys-hypervisor.c
64721+++ b/drivers/xen/sys-hypervisor.c
64722@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj,
64723 return 0;
64724 }
64725
64726-static struct sysfs_ops hyp_sysfs_ops = {
64727+static const struct sysfs_ops hyp_sysfs_ops = {
64728 .show = hyp_sysfs_show,
64729 .store = hyp_sysfs_store,
64730 };
64731diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
64732index 18f74ec..3227009 100644
64733--- a/fs/9p/vfs_inode.c
64734+++ b/fs/9p/vfs_inode.c
64735@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
64736 static void
64737 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
64738 {
64739- char *s = nd_get_link(nd);
64740+ const char *s = nd_get_link(nd);
64741
64742 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
64743 IS_ERR(s) ? "<error>" : s);
64744diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
64745index bb4cc5b..df5eaa0 100644
64746--- a/fs/Kconfig.binfmt
64747+++ b/fs/Kconfig.binfmt
64748@@ -86,7 +86,7 @@ config HAVE_AOUT
64749
64750 config BINFMT_AOUT
64751 tristate "Kernel support for a.out and ECOFF binaries"
64752- depends on HAVE_AOUT
64753+ depends on HAVE_AOUT && BROKEN
64754 ---help---
64755 A.out (Assembler.OUTput) is a set of formats for libraries and
64756 executables used in the earliest versions of UNIX. Linux used
64757diff --git a/fs/aio.c b/fs/aio.c
64758index 22a19ad..d484e5b 100644
64759--- a/fs/aio.c
64760+++ b/fs/aio.c
64761@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx)
64762 size += sizeof(struct io_event) * nr_events;
64763 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
64764
64765- if (nr_pages < 0)
64766+ if (nr_pages <= 0)
64767 return -EINVAL;
64768
64769 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
64770@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx,
64771 struct aio_timeout to;
64772 int retry = 0;
64773
64774+ pax_track_stack();
64775+
64776 /* needed to zero any padding within an entry (there shouldn't be
64777 * any, but C is fun!
64778 */
64779@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb)
64780 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
64781 {
64782 ssize_t ret;
64783+ struct iovec iovstack;
64784
64785 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
64786 kiocb->ki_nbytes, 1,
64787- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
64788+ &iovstack, &kiocb->ki_iovec);
64789 if (ret < 0)
64790 goto out;
64791
64792+ if (kiocb->ki_iovec == &iovstack) {
64793+ kiocb->ki_inline_vec = iovstack;
64794+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
64795+ }
64796 kiocb->ki_nr_segs = kiocb->ki_nbytes;
64797 kiocb->ki_cur_seg = 0;
64798 /* ki_nbytes/left now reflect bytes instead of segs */
64799diff --git a/fs/attr.c b/fs/attr.c
64800index 96d394b..33cf5b4 100644
64801--- a/fs/attr.c
64802+++ b/fs/attr.c
64803@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
64804 unsigned long limit;
64805
64806 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
64807+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
64808 if (limit != RLIM_INFINITY && offset > limit)
64809 goto out_sig;
64810 if (offset > inode->i_sb->s_maxbytes)
64811diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
64812index b4ea829..e63ef18 100644
64813--- a/fs/autofs4/symlink.c
64814+++ b/fs/autofs4/symlink.c
64815@@ -15,7 +15,7 @@
64816 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
64817 {
64818 struct autofs_info *ino = autofs4_dentry_ino(dentry);
64819- nd_set_link(nd, (char *)ino->u.symlink);
64820+ nd_set_link(nd, ino->u.symlink);
64821 return NULL;
64822 }
64823
64824diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
64825index 136a0d6..a287331 100644
64826--- a/fs/autofs4/waitq.c
64827+++ b/fs/autofs4/waitq.c
64828@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
64829 {
64830 unsigned long sigpipe, flags;
64831 mm_segment_t fs;
64832- const char *data = (const char *)addr;
64833+ const char __user *data = (const char __force_user *)addr;
64834 ssize_t wr = 0;
64835
64836 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
64837diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
64838index 9158c07..3f06659 100644
64839--- a/fs/befs/linuxvfs.c
64840+++ b/fs/befs/linuxvfs.c
64841@@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
64842 {
64843 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
64844 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
64845- char *link = nd_get_link(nd);
64846+ const char *link = nd_get_link(nd);
64847 if (!IS_ERR(link))
64848 kfree(link);
64849 }
64850diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
64851index 0133b5a..3710d09 100644
64852--- a/fs/binfmt_aout.c
64853+++ b/fs/binfmt_aout.c
64854@@ -16,6 +16,7 @@
64855 #include <linux/string.h>
64856 #include <linux/fs.h>
64857 #include <linux/file.h>
64858+#include <linux/security.h>
64859 #include <linux/stat.h>
64860 #include <linux/fcntl.h>
64861 #include <linux/ptrace.h>
64862@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
64863 #endif
64864 # define START_STACK(u) (u.start_stack)
64865
64866+ memset(&dump, 0, sizeof(dump));
64867+
64868 fs = get_fs();
64869 set_fs(KERNEL_DS);
64870 has_dumped = 1;
64871@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
64872
64873 /* If the size of the dump file exceeds the rlimit, then see what would happen
64874 if we wrote the stack, but not the data area. */
64875+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
64876 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
64877 dump.u_dsize = 0;
64878
64879 /* Make sure we have enough room to write the stack and data areas. */
64880+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
64881 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
64882 dump.u_ssize = 0;
64883
64884@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
64885 dump_size = dump.u_ssize << PAGE_SHIFT;
64886 DUMP_WRITE(dump_start,dump_size);
64887 }
64888-/* Finally dump the task struct. Not be used by gdb, but could be useful */
64889- set_fs(KERNEL_DS);
64890- DUMP_WRITE(current,sizeof(*current));
64891+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
64892 end_coredump:
64893 set_fs(fs);
64894 return has_dumped;
64895@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
64896 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
64897 if (rlim >= RLIM_INFINITY)
64898 rlim = ~0;
64899+
64900+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
64901 if (ex.a_data + ex.a_bss > rlim)
64902 return -ENOMEM;
64903
64904@@ -274,9 +279,37 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
64905 current->mm->free_area_cache = current->mm->mmap_base;
64906 current->mm->cached_hole_size = 0;
64907
64908+ retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
64909+ if (retval < 0) {
64910+ /* Someone check-me: is this error path enough? */
64911+ send_sig(SIGKILL, current, 0);
64912+ return retval;
64913+ }
64914+
64915 install_exec_creds(bprm);
64916 current->flags &= ~PF_FORKNOEXEC;
64917
64918+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
64919+ current->mm->pax_flags = 0UL;
64920+#endif
64921+
64922+#ifdef CONFIG_PAX_PAGEEXEC
64923+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
64924+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
64925+
64926+#ifdef CONFIG_PAX_EMUTRAMP
64927+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
64928+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
64929+#endif
64930+
64931+#ifdef CONFIG_PAX_MPROTECT
64932+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
64933+ current->mm->pax_flags |= MF_PAX_MPROTECT;
64934+#endif
64935+
64936+ }
64937+#endif
64938+
64939 if (N_MAGIC(ex) == OMAGIC) {
64940 unsigned long text_addr, map_size;
64941 loff_t pos;
64942@@ -349,7 +382,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
64943
64944 down_write(&current->mm->mmap_sem);
64945 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
64946- PROT_READ | PROT_WRITE | PROT_EXEC,
64947+ PROT_READ | PROT_WRITE,
64948 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
64949 fd_offset + ex.a_text);
64950 up_write(&current->mm->mmap_sem);
64951@@ -367,13 +400,6 @@ beyond_if:
64952 return retval;
64953 }
64954
64955- retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
64956- if (retval < 0) {
64957- /* Someone check-me: is this error path enough? */
64958- send_sig(SIGKILL, current, 0);
64959- return retval;
64960- }
64961-
64962 current->mm->start_stack =
64963 (unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
64964 #ifdef __alpha__
64965diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
64966index 1ed37ba..66794b9 100644
64967--- a/fs/binfmt_elf.c
64968+++ b/fs/binfmt_elf.c
64969@@ -31,6 +31,7 @@
64970 #include <linux/random.h>
64971 #include <linux/elf.h>
64972 #include <linux/utsname.h>
64973+#include <linux/xattr.h>
64974 #include <asm/uaccess.h>
64975 #include <asm/param.h>
64976 #include <asm/page.h>
64977@@ -50,6 +51,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
64978 #define elf_core_dump NULL
64979 #endif
64980
64981+#ifdef CONFIG_PAX_MPROTECT
64982+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
64983+#endif
64984+
64985 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
64986 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
64987 #else
64988@@ -69,6 +74,11 @@ static struct linux_binfmt elf_format = {
64989 .load_binary = load_elf_binary,
64990 .load_shlib = load_elf_library,
64991 .core_dump = elf_core_dump,
64992+
64993+#ifdef CONFIG_PAX_MPROTECT
64994+ .handle_mprotect= elf_handle_mprotect,
64995+#endif
64996+
64997 .min_coredump = ELF_EXEC_PAGESIZE,
64998 .hasvdso = 1
64999 };
65000@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
65001
65002 static int set_brk(unsigned long start, unsigned long end)
65003 {
65004+ unsigned long e = end;
65005+
65006 start = ELF_PAGEALIGN(start);
65007 end = ELF_PAGEALIGN(end);
65008 if (end > start) {
65009@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
65010 if (BAD_ADDR(addr))
65011 return addr;
65012 }
65013- current->mm->start_brk = current->mm->brk = end;
65014+ current->mm->start_brk = current->mm->brk = e;
65015 return 0;
65016 }
65017
65018@@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
65019 elf_addr_t __user *u_rand_bytes;
65020 const char *k_platform = ELF_PLATFORM;
65021 const char *k_base_platform = ELF_BASE_PLATFORM;
65022- unsigned char k_rand_bytes[16];
65023+ u32 k_rand_bytes[4];
65024 int items;
65025 elf_addr_t *elf_info;
65026 int ei_index = 0;
65027 const struct cred *cred = current_cred();
65028 struct vm_area_struct *vma;
65029+ unsigned long saved_auxv[AT_VECTOR_SIZE];
65030+
65031+ pax_track_stack();
65032
65033 /*
65034 * In some cases (e.g. Hyper-Threading), we want to avoid L1
65035@@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
65036 * Generate 16 random bytes for userspace PRNG seeding.
65037 */
65038 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
65039- u_rand_bytes = (elf_addr_t __user *)
65040- STACK_ALLOC(p, sizeof(k_rand_bytes));
65041+ srandom32(k_rand_bytes[0] ^ random32());
65042+ srandom32(k_rand_bytes[1] ^ random32());
65043+ srandom32(k_rand_bytes[2] ^ random32());
65044+ srandom32(k_rand_bytes[3] ^ random32());
65045+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
65046+ u_rand_bytes = (elf_addr_t __user *) p;
65047 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
65048 return -EFAULT;
65049
65050@@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
65051 return -EFAULT;
65052 current->mm->env_end = p;
65053
65054+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
65055+
65056 /* Put the elf_info on the stack in the right place. */
65057 sp = (elf_addr_t __user *)envp + 1;
65058- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
65059+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
65060 return -EFAULT;
65061 return 0;
65062 }
65063@@ -385,10 +406,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
65064 {
65065 struct elf_phdr *elf_phdata;
65066 struct elf_phdr *eppnt;
65067- unsigned long load_addr = 0;
65068+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
65069 int load_addr_set = 0;
65070 unsigned long last_bss = 0, elf_bss = 0;
65071- unsigned long error = ~0UL;
65072+ unsigned long error = -EINVAL;
65073 unsigned long total_size;
65074 int retval, i, size;
65075
65076@@ -434,6 +455,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
65077 goto out_close;
65078 }
65079
65080+#ifdef CONFIG_PAX_SEGMEXEC
65081+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
65082+ pax_task_size = SEGMEXEC_TASK_SIZE;
65083+#endif
65084+
65085 eppnt = elf_phdata;
65086 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
65087 if (eppnt->p_type == PT_LOAD) {
65088@@ -477,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
65089 k = load_addr + eppnt->p_vaddr;
65090 if (BAD_ADDR(k) ||
65091 eppnt->p_filesz > eppnt->p_memsz ||
65092- eppnt->p_memsz > TASK_SIZE ||
65093- TASK_SIZE - eppnt->p_memsz < k) {
65094+ eppnt->p_memsz > pax_task_size ||
65095+ pax_task_size - eppnt->p_memsz < k) {
65096 error = -ENOMEM;
65097 goto out_close;
65098 }
65099@@ -532,6 +558,351 @@ out:
65100 return error;
65101 }
65102
65103+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
65104+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
65105+{
65106+ unsigned long pax_flags = 0UL;
65107+
65108+#ifdef CONFIG_PAX_PT_PAX_FLAGS
65109+
65110+#ifdef CONFIG_PAX_PAGEEXEC
65111+ if (elf_phdata->p_flags & PF_PAGEEXEC)
65112+ pax_flags |= MF_PAX_PAGEEXEC;
65113+#endif
65114+
65115+#ifdef CONFIG_PAX_SEGMEXEC
65116+ if (elf_phdata->p_flags & PF_SEGMEXEC)
65117+ pax_flags |= MF_PAX_SEGMEXEC;
65118+#endif
65119+
65120+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
65121+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65122+ if (nx_enabled)
65123+ pax_flags &= ~MF_PAX_SEGMEXEC;
65124+ else
65125+ pax_flags &= ~MF_PAX_PAGEEXEC;
65126+ }
65127+#endif
65128+
65129+#ifdef CONFIG_PAX_EMUTRAMP
65130+ if (elf_phdata->p_flags & PF_EMUTRAMP)
65131+ pax_flags |= MF_PAX_EMUTRAMP;
65132+#endif
65133+
65134+#ifdef CONFIG_PAX_MPROTECT
65135+ if (elf_phdata->p_flags & PF_MPROTECT)
65136+ pax_flags |= MF_PAX_MPROTECT;
65137+#endif
65138+
65139+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
65140+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
65141+ pax_flags |= MF_PAX_RANDMMAP;
65142+#endif
65143+
65144+#endif
65145+
65146+ return pax_flags;
65147+}
65148+
65149+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
65150+{
65151+ unsigned long pax_flags = 0UL;
65152+
65153+#ifdef CONFIG_PAX_PT_PAX_FLAGS
65154+
65155+#ifdef CONFIG_PAX_PAGEEXEC
65156+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
65157+ pax_flags |= MF_PAX_PAGEEXEC;
65158+#endif
65159+
65160+#ifdef CONFIG_PAX_SEGMEXEC
65161+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
65162+ pax_flags |= MF_PAX_SEGMEXEC;
65163+#endif
65164+
65165+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
65166+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65167+ if (nx_enabled)
65168+ pax_flags &= ~MF_PAX_SEGMEXEC;
65169+ else
65170+ pax_flags &= ~MF_PAX_PAGEEXEC;
65171+ }
65172+#endif
65173+
65174+#ifdef CONFIG_PAX_EMUTRAMP
65175+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
65176+ pax_flags |= MF_PAX_EMUTRAMP;
65177+#endif
65178+
65179+#ifdef CONFIG_PAX_MPROTECT
65180+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
65181+ pax_flags |= MF_PAX_MPROTECT;
65182+#endif
65183+
65184+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
65185+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
65186+ pax_flags |= MF_PAX_RANDMMAP;
65187+#endif
65188+
65189+#endif
65190+
65191+ return pax_flags;
65192+}
65193+
65194+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
65195+{
65196+ unsigned long pax_flags = 0UL;
65197+
65198+#ifdef CONFIG_PAX_EI_PAX
65199+
65200+#ifdef CONFIG_PAX_PAGEEXEC
65201+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
65202+ pax_flags |= MF_PAX_PAGEEXEC;
65203+#endif
65204+
65205+#ifdef CONFIG_PAX_SEGMEXEC
65206+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
65207+ pax_flags |= MF_PAX_SEGMEXEC;
65208+#endif
65209+
65210+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
65211+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65212+ if (nx_enabled)
65213+ pax_flags &= ~MF_PAX_SEGMEXEC;
65214+ else
65215+ pax_flags &= ~MF_PAX_PAGEEXEC;
65216+ }
65217+#endif
65218+
65219+#ifdef CONFIG_PAX_EMUTRAMP
65220+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
65221+ pax_flags |= MF_PAX_EMUTRAMP;
65222+#endif
65223+
65224+#ifdef CONFIG_PAX_MPROTECT
65225+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
65226+ pax_flags |= MF_PAX_MPROTECT;
65227+#endif
65228+
65229+#ifdef CONFIG_PAX_ASLR
65230+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
65231+ pax_flags |= MF_PAX_RANDMMAP;
65232+#endif
65233+
65234+#else
65235+
65236+#ifdef CONFIG_PAX_PAGEEXEC
65237+ pax_flags |= MF_PAX_PAGEEXEC;
65238+#endif
65239+
65240+#ifdef CONFIG_PAX_MPROTECT
65241+ pax_flags |= MF_PAX_MPROTECT;
65242+#endif
65243+
65244+#ifdef CONFIG_PAX_RANDMMAP
65245+ pax_flags |= MF_PAX_RANDMMAP;
65246+#endif
65247+
65248+#ifdef CONFIG_PAX_SEGMEXEC
65249+ if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
65250+ pax_flags &= ~MF_PAX_PAGEEXEC;
65251+ pax_flags |= MF_PAX_SEGMEXEC;
65252+ }
65253+#endif
65254+
65255+#endif
65256+
65257+ return pax_flags;
65258+}
65259+
65260+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
65261+{
65262+
65263+#ifdef CONFIG_PAX_PT_PAX_FLAGS
65264+ unsigned long i;
65265+
65266+ for (i = 0UL; i < elf_ex->e_phnum; i++)
65267+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
65268+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
65269+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
65270+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
65271+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
65272+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
65273+ return ~0UL;
65274+
65275+#ifdef CONFIG_PAX_SOFTMODE
65276+ if (pax_softmode)
65277+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
65278+ else
65279+#endif
65280+
65281+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
65282+ break;
65283+ }
65284+#endif
65285+
65286+ return ~0UL;
65287+}
65288+
65289+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
65290+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
65291+{
65292+ unsigned long pax_flags = 0UL;
65293+
65294+#ifdef CONFIG_PAX_PAGEEXEC
65295+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
65296+ pax_flags |= MF_PAX_PAGEEXEC;
65297+#endif
65298+
65299+#ifdef CONFIG_PAX_SEGMEXEC
65300+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
65301+ pax_flags |= MF_PAX_SEGMEXEC;
65302+#endif
65303+
65304+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
65305+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65306+ if ((__supported_pte_mask & _PAGE_NX))
65307+ pax_flags &= ~MF_PAX_SEGMEXEC;
65308+ else
65309+ pax_flags &= ~MF_PAX_PAGEEXEC;
65310+ }
65311+#endif
65312+
65313+#ifdef CONFIG_PAX_EMUTRAMP
65314+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
65315+ pax_flags |= MF_PAX_EMUTRAMP;
65316+#endif
65317+
65318+#ifdef CONFIG_PAX_MPROTECT
65319+ if (pax_flags_softmode & MF_PAX_MPROTECT)
65320+ pax_flags |= MF_PAX_MPROTECT;
65321+#endif
65322+
65323+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
65324+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
65325+ pax_flags |= MF_PAX_RANDMMAP;
65326+#endif
65327+
65328+ return pax_flags;
65329+}
65330+
65331+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
65332+{
65333+ unsigned long pax_flags = 0UL;
65334+
65335+#ifdef CONFIG_PAX_PAGEEXEC
65336+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
65337+ pax_flags |= MF_PAX_PAGEEXEC;
65338+#endif
65339+
65340+#ifdef CONFIG_PAX_SEGMEXEC
65341+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
65342+ pax_flags |= MF_PAX_SEGMEXEC;
65343+#endif
65344+
65345+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
65346+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65347+ if ((__supported_pte_mask & _PAGE_NX))
65348+ pax_flags &= ~MF_PAX_SEGMEXEC;
65349+ else
65350+ pax_flags &= ~MF_PAX_PAGEEXEC;
65351+ }
65352+#endif
65353+
65354+#ifdef CONFIG_PAX_EMUTRAMP
65355+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
65356+ pax_flags |= MF_PAX_EMUTRAMP;
65357+#endif
65358+
65359+#ifdef CONFIG_PAX_MPROTECT
65360+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
65361+ pax_flags |= MF_PAX_MPROTECT;
65362+#endif
65363+
65364+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
65365+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
65366+ pax_flags |= MF_PAX_RANDMMAP;
65367+#endif
65368+
65369+ return pax_flags;
65370+}
65371+#endif
65372+
65373+static unsigned long pax_parse_xattr_pax(struct file * const file)
65374+{
65375+
65376+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
65377+ ssize_t xattr_size, i;
65378+ unsigned char xattr_value[5];
65379+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
65380+
65381+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
65382+ if (xattr_size <= 0)
65383+ return ~0UL;
65384+
65385+ for (i = 0; i < xattr_size; i++)
65386+ switch (xattr_value[i]) {
65387+ default:
65388+ return ~0UL;
65389+
65390+#define parse_flag(option1, option2, flag) \
65391+ case option1: \
65392+ pax_flags_hardmode |= MF_PAX_##flag; \
65393+ break; \
65394+ case option2: \
65395+ pax_flags_softmode |= MF_PAX_##flag; \
65396+ break;
65397+
65398+ parse_flag('p', 'P', PAGEEXEC);
65399+ parse_flag('e', 'E', EMUTRAMP);
65400+ parse_flag('m', 'M', MPROTECT);
65401+ parse_flag('r', 'R', RANDMMAP);
65402+ parse_flag('s', 'S', SEGMEXEC);
65403+
65404+#undef parse_flag
65405+ }
65406+
65407+ if (pax_flags_hardmode & pax_flags_softmode)
65408+ return ~0UL;
65409+
65410+#ifdef CONFIG_PAX_SOFTMODE
65411+ if (pax_softmode)
65412+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
65413+ else
65414+#endif
65415+
65416+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
65417+#else
65418+ return ~0UL;
65419+#endif
65420+
65421+}
65422+
65423+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
65424+{
65425+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
65426+
65427+ pax_flags = pax_parse_ei_pax(elf_ex);
65428+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
65429+ xattr_pax_flags = pax_parse_xattr_pax(file);
65430+
65431+ if (pt_pax_flags == ~0UL)
65432+ pt_pax_flags = xattr_pax_flags;
65433+ else if (xattr_pax_flags == ~0UL)
65434+ xattr_pax_flags = pt_pax_flags;
65435+ if (pt_pax_flags != xattr_pax_flags)
65436+ return -EINVAL;
65437+ if (pt_pax_flags != ~0UL)
65438+ pax_flags = pt_pax_flags;
65439+
65440+ if (0 > pax_check_flags(&pax_flags))
65441+ return -EINVAL;
65442+
65443+ current->mm->pax_flags = pax_flags;
65444+ return 0;
65445+}
65446+#endif
65447+
65448 /*
65449 * These are the functions used to load ELF style executables and shared
65450 * libraries. There is no binary dependent code anywhere else.
65451@@ -548,6 +919,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
65452 {
65453 unsigned int random_variable = 0;
65454
65455+#ifdef CONFIG_PAX_RANDUSTACK
65456+ if (randomize_va_space)
65457+ return stack_top - current->mm->delta_stack;
65458+#endif
65459+
65460 if ((current->flags & PF_RANDOMIZE) &&
65461 !(current->personality & ADDR_NO_RANDOMIZE)) {
65462 random_variable = get_random_int() & STACK_RND_MASK;
65463@@ -566,7 +942,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65464 unsigned long load_addr = 0, load_bias = 0;
65465 int load_addr_set = 0;
65466 char * elf_interpreter = NULL;
65467- unsigned long error;
65468+ unsigned long error = 0;
65469 struct elf_phdr *elf_ppnt, *elf_phdata;
65470 unsigned long elf_bss, elf_brk;
65471 int retval, i;
65472@@ -576,11 +952,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65473 unsigned long start_code, end_code, start_data, end_data;
65474 unsigned long reloc_func_desc = 0;
65475 int executable_stack = EXSTACK_DEFAULT;
65476- unsigned long def_flags = 0;
65477 struct {
65478 struct elfhdr elf_ex;
65479 struct elfhdr interp_elf_ex;
65480 } *loc;
65481+ unsigned long pax_task_size = TASK_SIZE;
65482
65483 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
65484 if (!loc) {
65485@@ -718,11 +1094,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65486
65487 /* OK, This is the point of no return */
65488 current->flags &= ~PF_FORKNOEXEC;
65489- current->mm->def_flags = def_flags;
65490+
65491+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65492+ current->mm->pax_flags = 0UL;
65493+#endif
65494+
65495+#ifdef CONFIG_PAX_DLRESOLVE
65496+ current->mm->call_dl_resolve = 0UL;
65497+#endif
65498+
65499+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
65500+ current->mm->call_syscall = 0UL;
65501+#endif
65502+
65503+#ifdef CONFIG_PAX_ASLR
65504+ current->mm->delta_mmap = 0UL;
65505+ current->mm->delta_stack = 0UL;
65506+#endif
65507+
65508+ current->mm->def_flags = 0;
65509+
65510+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
65511+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
65512+ send_sig(SIGKILL, current, 0);
65513+ goto out_free_dentry;
65514+ }
65515+#endif
65516+
65517+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
65518+ pax_set_initial_flags(bprm);
65519+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
65520+ if (pax_set_initial_flags_func)
65521+ (pax_set_initial_flags_func)(bprm);
65522+#endif
65523+
65524+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65525+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
65526+ current->mm->context.user_cs_limit = PAGE_SIZE;
65527+ current->mm->def_flags |= VM_PAGEEXEC;
65528+ }
65529+#endif
65530+
65531+#ifdef CONFIG_PAX_SEGMEXEC
65532+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
65533+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
65534+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
65535+ pax_task_size = SEGMEXEC_TASK_SIZE;
65536+ }
65537+#endif
65538+
65539+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
65540+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65541+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
65542+ put_cpu();
65543+ }
65544+#endif
65545
65546 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
65547 may depend on the personality. */
65548 SET_PERSONALITY(loc->elf_ex);
65549+
65550+#ifdef CONFIG_PAX_ASLR
65551+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
65552+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
65553+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
65554+ }
65555+#endif
65556+
65557+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
65558+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65559+ executable_stack = EXSTACK_DISABLE_X;
65560+ current->personality &= ~READ_IMPLIES_EXEC;
65561+ } else
65562+#endif
65563+
65564 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
65565 current->personality |= READ_IMPLIES_EXEC;
65566
65567@@ -800,10 +1245,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65568 * might try to exec. This is because the brk will
65569 * follow the loader, and is not movable. */
65570 #ifdef CONFIG_X86
65571- load_bias = 0;
65572+ if (current->flags & PF_RANDOMIZE)
65573+ load_bias = 0;
65574+ else
65575+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
65576 #else
65577 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
65578 #endif
65579+
65580+#ifdef CONFIG_PAX_RANDMMAP
65581+ /* PaX: randomize base address at the default exe base if requested */
65582+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
65583+#ifdef CONFIG_SPARC64
65584+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
65585+#else
65586+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
65587+#endif
65588+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
65589+ elf_flags |= MAP_FIXED;
65590+ }
65591+#endif
65592+
65593 }
65594
65595 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
65596@@ -836,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65597 * allowed task size. Note that p_filesz must always be
65598 * <= p_memsz so it is only necessary to check p_memsz.
65599 */
65600- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
65601- elf_ppnt->p_memsz > TASK_SIZE ||
65602- TASK_SIZE - elf_ppnt->p_memsz < k) {
65603+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
65604+ elf_ppnt->p_memsz > pax_task_size ||
65605+ pax_task_size - elf_ppnt->p_memsz < k) {
65606 /* set_brk can never work. Avoid overflows. */
65607 send_sig(SIGKILL, current, 0);
65608 retval = -EINVAL;
65609@@ -866,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65610 start_data += load_bias;
65611 end_data += load_bias;
65612
65613+#ifdef CONFIG_PAX_RANDMMAP
65614+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
65615+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
65616+#endif
65617+
65618 /* Calling set_brk effectively mmaps the pages that we need
65619 * for the bss and break sections. We must do this before
65620 * mapping in the interpreter, to make sure it doesn't wind
65621@@ -877,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65622 goto out_free_dentry;
65623 }
65624 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
65625- send_sig(SIGSEGV, current, 0);
65626- retval = -EFAULT; /* Nobody gets to see this, but.. */
65627- goto out_free_dentry;
65628+ /*
65629+ * This bss-zeroing can fail if the ELF
65630+ * file specifies odd protections. So
65631+ * we don't check the return value
65632+ */
65633 }
65634
65635 if (elf_interpreter) {
65636@@ -1112,8 +1581,10 @@ static int dump_seek(struct file *file, loff_t off)
65637 unsigned long n = off;
65638 if (n > PAGE_SIZE)
65639 n = PAGE_SIZE;
65640- if (!dump_write(file, buf, n))
65641+ if (!dump_write(file, buf, n)) {
65642+ free_page((unsigned long)buf);
65643 return 0;
65644+ }
65645 off -= n;
65646 }
65647 free_page((unsigned long)buf);
65648@@ -1125,7 +1596,7 @@ static int dump_seek(struct file *file, loff_t off)
65649 * Decide what to dump of a segment, part, all or none.
65650 */
65651 static unsigned long vma_dump_size(struct vm_area_struct *vma,
65652- unsigned long mm_flags)
65653+ unsigned long mm_flags, long signr)
65654 {
65655 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
65656
65657@@ -1159,7 +1630,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
65658 if (vma->vm_file == NULL)
65659 return 0;
65660
65661- if (FILTER(MAPPED_PRIVATE))
65662+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
65663 goto whole;
65664
65665 /*
65666@@ -1255,8 +1726,11 @@ static int writenote(struct memelfnote *men, struct file *file,
65667 #undef DUMP_WRITE
65668
65669 #define DUMP_WRITE(addr, nr) \
65670+ do { \
65671+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
65672 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
65673- goto end_coredump;
65674+ goto end_coredump; \
65675+ } while (0);
65676
65677 static void fill_elf_header(struct elfhdr *elf, int segs,
65678 u16 machine, u32 flags, u8 osabi)
65679@@ -1385,9 +1859,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
65680 {
65681 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
65682 int i = 0;
65683- do
65684+ do {
65685 i += 2;
65686- while (auxv[i - 2] != AT_NULL);
65687+ } while (auxv[i - 2] != AT_NULL);
65688 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
65689 }
65690
65691@@ -1452,7 +1926,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
65692 for (i = 1; i < view->n; ++i) {
65693 const struct user_regset *regset = &view->regsets[i];
65694 do_thread_regset_writeback(t->task, regset);
65695- if (regset->core_note_type &&
65696+ if (regset->core_note_type && regset->get &&
65697 (!regset->active || regset->active(t->task, regset))) {
65698 int ret;
65699 size_t size = regset->n * regset->size;
65700@@ -1973,7 +2447,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
65701 phdr.p_offset = offset;
65702 phdr.p_vaddr = vma->vm_start;
65703 phdr.p_paddr = 0;
65704- phdr.p_filesz = vma_dump_size(vma, mm_flags);
65705+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
65706 phdr.p_memsz = vma->vm_end - vma->vm_start;
65707 offset += phdr.p_filesz;
65708 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
65709@@ -2006,7 +2480,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
65710 unsigned long addr;
65711 unsigned long end;
65712
65713- end = vma->vm_start + vma_dump_size(vma, mm_flags);
65714+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
65715
65716 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
65717 struct page *page;
65718@@ -2015,6 +2489,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
65719 page = get_dump_page(addr);
65720 if (page) {
65721 void *kaddr = kmap(page);
65722+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
65723 stop = ((size += PAGE_SIZE) > limit) ||
65724 !dump_write(file, kaddr, PAGE_SIZE);
65725 kunmap(page);
65726@@ -2042,6 +2517,97 @@ out:
65727
65728 #endif /* USE_ELF_CORE_DUMP */
65729
65730+#ifdef CONFIG_PAX_MPROTECT
65731+/* PaX: non-PIC ELF libraries need relocations on their executable segments
65732+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
65733+ * we'll remove VM_MAYWRITE for good on RELRO segments.
65734+ *
65735+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
65736+ * basis because we want to allow the common case and not the special ones.
65737+ */
65738+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
65739+{
65740+ struct elfhdr elf_h;
65741+ struct elf_phdr elf_p;
65742+ unsigned long i;
65743+ unsigned long oldflags;
65744+ bool is_textrel_rw, is_textrel_rx, is_relro;
65745+
65746+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
65747+ return;
65748+
65749+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
65750+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
65751+
65752+#ifdef CONFIG_PAX_ELFRELOCS
65753+ /* possible TEXTREL */
65754+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
65755+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
65756+#else
65757+ is_textrel_rw = false;
65758+ is_textrel_rx = false;
65759+#endif
65760+
65761+ /* possible RELRO */
65762+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
65763+
65764+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
65765+ return;
65766+
65767+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
65768+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
65769+
65770+#ifdef CONFIG_PAX_ETEXECRELOCS
65771+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
65772+#else
65773+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
65774+#endif
65775+
65776+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
65777+ !elf_check_arch(&elf_h) ||
65778+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
65779+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
65780+ return;
65781+
65782+ for (i = 0UL; i < elf_h.e_phnum; i++) {
65783+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
65784+ return;
65785+ switch (elf_p.p_type) {
65786+ case PT_DYNAMIC:
65787+ if (!is_textrel_rw && !is_textrel_rx)
65788+ continue;
65789+ i = 0UL;
65790+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
65791+ elf_dyn dyn;
65792+
65793+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
65794+ return;
65795+ if (dyn.d_tag == DT_NULL)
65796+ return;
65797+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
65798+ gr_log_textrel(vma);
65799+ if (is_textrel_rw)
65800+ vma->vm_flags |= VM_MAYWRITE;
65801+ else
65802+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
65803+ vma->vm_flags &= ~VM_MAYWRITE;
65804+ return;
65805+ }
65806+ i++;
65807+ }
65808+ return;
65809+
65810+ case PT_GNU_RELRO:
65811+ if (!is_relro)
65812+ continue;
65813+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
65814+ vma->vm_flags &= ~VM_MAYWRITE;
65815+ return;
65816+ }
65817+ }
65818+}
65819+#endif
65820+
65821 static int __init init_elf_binfmt(void)
65822 {
65823 return register_binfmt(&elf_format);
65824diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
65825index ca88c46..f155a60 100644
65826--- a/fs/binfmt_flat.c
65827+++ b/fs/binfmt_flat.c
65828@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm,
65829 realdatastart = (unsigned long) -ENOMEM;
65830 printk("Unable to allocate RAM for process data, errno %d\n",
65831 (int)-realdatastart);
65832+ down_write(&current->mm->mmap_sem);
65833 do_munmap(current->mm, textpos, text_len);
65834+ up_write(&current->mm->mmap_sem);
65835 ret = realdatastart;
65836 goto err;
65837 }
65838@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm,
65839 }
65840 if (IS_ERR_VALUE(result)) {
65841 printk("Unable to read data+bss, errno %d\n", (int)-result);
65842+ down_write(&current->mm->mmap_sem);
65843 do_munmap(current->mm, textpos, text_len);
65844 do_munmap(current->mm, realdatastart, data_len + extra);
65845+ up_write(&current->mm->mmap_sem);
65846 ret = result;
65847 goto err;
65848 }
65849@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm,
65850 }
65851 if (IS_ERR_VALUE(result)) {
65852 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
65853+ down_write(&current->mm->mmap_sem);
65854 do_munmap(current->mm, textpos, text_len + data_len + extra +
65855 MAX_SHARED_LIBS * sizeof(unsigned long));
65856+ up_write(&current->mm->mmap_sem);
65857 ret = result;
65858 goto err;
65859 }
65860diff --git a/fs/bio.c b/fs/bio.c
65861index e696713..83de133 100644
65862--- a/fs/bio.c
65863+++ b/fs/bio.c
65864@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
65865
65866 i = 0;
65867 while (i < bio_slab_nr) {
65868- struct bio_slab *bslab = &bio_slabs[i];
65869+ bslab = &bio_slabs[i];
65870
65871 if (!bslab->slab && entry == -1)
65872 entry = i;
65873@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
65874 const int read = bio_data_dir(bio) == READ;
65875 struct bio_map_data *bmd = bio->bi_private;
65876 int i;
65877- char *p = bmd->sgvecs[0].iov_base;
65878+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
65879
65880 __bio_for_each_segment(bvec, bio, i, 0) {
65881 char *addr = page_address(bvec->bv_page);
65882diff --git a/fs/block_dev.c b/fs/block_dev.c
65883index e65efa2..04fae57 100644
65884--- a/fs/block_dev.c
65885+++ b/fs/block_dev.c
65886@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder)
65887 else if (bdev->bd_contains == bdev)
65888 res = 0; /* is a whole device which isn't held */
65889
65890- else if (bdev->bd_contains->bd_holder == bd_claim)
65891+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
65892 res = 0; /* is a partition of a device that is being partitioned */
65893 else if (bdev->bd_contains->bd_holder != NULL)
65894 res = -EBUSY; /* is a partition of a held device */
65895diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
65896index c4bc570..42acd8d 100644
65897--- a/fs/btrfs/ctree.c
65898+++ b/fs/btrfs/ctree.c
65899@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
65900 free_extent_buffer(buf);
65901 add_root_to_dirty_list(root);
65902 } else {
65903- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
65904- parent_start = parent->start;
65905- else
65906+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
65907+ if (parent)
65908+ parent_start = parent->start;
65909+ else
65910+ parent_start = 0;
65911+ } else
65912 parent_start = 0;
65913
65914 WARN_ON(trans->transid != btrfs_header_generation(parent));
65915@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
65916
65917 ret = 0;
65918 if (slot == 0) {
65919- struct btrfs_disk_key disk_key;
65920 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
65921 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
65922 }
65923diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
65924index f447188..59c17c5 100644
65925--- a/fs/btrfs/disk-io.c
65926+++ b/fs/btrfs/disk-io.c
65927@@ -39,7 +39,7 @@
65928 #include "tree-log.h"
65929 #include "free-space-cache.h"
65930
65931-static struct extent_io_ops btree_extent_io_ops;
65932+static const struct extent_io_ops btree_extent_io_ops;
65933 static void end_workqueue_fn(struct btrfs_work *work);
65934 static void free_fs_root(struct btrfs_root *root);
65935
65936@@ -2607,7 +2607,7 @@ out:
65937 return 0;
65938 }
65939
65940-static struct extent_io_ops btree_extent_io_ops = {
65941+static const struct extent_io_ops btree_extent_io_ops = {
65942 .write_cache_pages_lock_hook = btree_lock_page_hook,
65943 .readpage_end_io_hook = btree_readpage_end_io_hook,
65944 .submit_bio_hook = btree_submit_bio_hook,
65945diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
65946index 559f724..a026171 100644
65947--- a/fs/btrfs/extent-tree.c
65948+++ b/fs/btrfs/extent-tree.c
65949@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
65950 u64 group_start = group->key.objectid;
65951 new_extents = kmalloc(sizeof(*new_extents),
65952 GFP_NOFS);
65953+ if (!new_extents) {
65954+ ret = -ENOMEM;
65955+ goto out;
65956+ }
65957 nr_extents = 1;
65958 ret = get_new_locations(reloc_inode,
65959 extent_key,
65960diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
65961index 36de250..7ec75c7 100644
65962--- a/fs/btrfs/extent_io.h
65963+++ b/fs/btrfs/extent_io.h
65964@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
65965 struct bio *bio, int mirror_num,
65966 unsigned long bio_flags);
65967 struct extent_io_ops {
65968- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
65969+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
65970 u64 start, u64 end, int *page_started,
65971 unsigned long *nr_written);
65972- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
65973- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
65974+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
65975+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
65976 extent_submit_bio_hook_t *submit_bio_hook;
65977- int (*merge_bio_hook)(struct page *page, unsigned long offset,
65978+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
65979 size_t size, struct bio *bio,
65980 unsigned long bio_flags);
65981- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
65982- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
65983+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
65984+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
65985 u64 start, u64 end,
65986 struct extent_state *state);
65987- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
65988+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
65989 u64 start, u64 end,
65990 struct extent_state *state);
65991- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
65992+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
65993 struct extent_state *state);
65994- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
65995+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
65996 struct extent_state *state, int uptodate);
65997- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
65998+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
65999 unsigned long old, unsigned long bits);
66000- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
66001+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
66002 unsigned long bits);
66003- int (*merge_extent_hook)(struct inode *inode,
66004+ int (* const merge_extent_hook)(struct inode *inode,
66005 struct extent_state *new,
66006 struct extent_state *other);
66007- int (*split_extent_hook)(struct inode *inode,
66008+ int (* const split_extent_hook)(struct inode *inode,
66009 struct extent_state *orig, u64 split);
66010- int (*write_cache_pages_lock_hook)(struct page *page);
66011+ int (* const write_cache_pages_lock_hook)(struct page *page);
66012 };
66013
66014 struct extent_io_tree {
66015@@ -88,7 +88,7 @@ struct extent_io_tree {
66016 u64 dirty_bytes;
66017 spinlock_t lock;
66018 spinlock_t buffer_lock;
66019- struct extent_io_ops *ops;
66020+ const struct extent_io_ops *ops;
66021 };
66022
66023 struct extent_state {
66024diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
66025index cb2849f..3718fb4 100644
66026--- a/fs/btrfs/free-space-cache.c
66027+++ b/fs/btrfs/free-space-cache.c
66028@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
66029
66030 while(1) {
66031 if (entry->bytes < bytes || entry->offset < min_start) {
66032- struct rb_node *node;
66033-
66034 node = rb_next(&entry->offset_index);
66035 if (!node)
66036 break;
66037@@ -1226,7 +1224,7 @@ again:
66038 */
66039 while (entry->bitmap || found_bitmap ||
66040 (!entry->bitmap && entry->bytes < min_bytes)) {
66041- struct rb_node *node = rb_next(&entry->offset_index);
66042+ node = rb_next(&entry->offset_index);
66043
66044 if (entry->bitmap && entry->bytes > bytes + empty_size) {
66045 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
66046diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
66047index e03a836..323837e 100644
66048--- a/fs/btrfs/inode.c
66049+++ b/fs/btrfs/inode.c
66050@@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations;
66051 static const struct address_space_operations btrfs_aops;
66052 static const struct address_space_operations btrfs_symlink_aops;
66053 static const struct file_operations btrfs_dir_file_operations;
66054-static struct extent_io_ops btrfs_extent_io_ops;
66055+static const struct extent_io_ops btrfs_extent_io_ops;
66056
66057 static struct kmem_cache *btrfs_inode_cachep;
66058 struct kmem_cache *btrfs_trans_handle_cachep;
66059@@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
66060 1, 0, NULL, GFP_NOFS);
66061 while (start < end) {
66062 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
66063+ BUG_ON(!async_cow);
66064 async_cow->inode = inode;
66065 async_cow->root = root;
66066 async_cow->locked_page = locked_page;
66067@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
66068 inline_size = btrfs_file_extent_inline_item_len(leaf,
66069 btrfs_item_nr(leaf, path->slots[0]));
66070 tmp = kmalloc(inline_size, GFP_NOFS);
66071+ if (!tmp)
66072+ return -ENOMEM;
66073 ptr = btrfs_file_extent_inline_start(item);
66074
66075 read_extent_buffer(leaf, tmp, ptr, inline_size);
66076@@ -5410,7 +5413,7 @@ fail:
66077 return -ENOMEM;
66078 }
66079
66080-static int btrfs_getattr(struct vfsmount *mnt,
66081+int btrfs_getattr(struct vfsmount *mnt,
66082 struct dentry *dentry, struct kstat *stat)
66083 {
66084 struct inode *inode = dentry->d_inode;
66085@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
66086 return 0;
66087 }
66088
66089+EXPORT_SYMBOL(btrfs_getattr);
66090+
66091+dev_t get_btrfs_dev_from_inode(struct inode *inode)
66092+{
66093+ return BTRFS_I(inode)->root->anon_super.s_dev;
66094+}
66095+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
66096+
66097 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
66098 struct inode *new_dir, struct dentry *new_dentry)
66099 {
66100@@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = {
66101 .fsync = btrfs_sync_file,
66102 };
66103
66104-static struct extent_io_ops btrfs_extent_io_ops = {
66105+static const struct extent_io_ops btrfs_extent_io_ops = {
66106 .fill_delalloc = run_delalloc_range,
66107 .submit_bio_hook = btrfs_submit_bio_hook,
66108 .merge_bio_hook = btrfs_merge_bio_hook,
66109diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
66110index ab7ab53..94e0781 100644
66111--- a/fs/btrfs/relocation.c
66112+++ b/fs/btrfs/relocation.c
66113@@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
66114 }
66115 spin_unlock(&rc->reloc_root_tree.lock);
66116
66117- BUG_ON((struct btrfs_root *)node->data != root);
66118+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
66119
66120 if (!del) {
66121 spin_lock(&rc->reloc_root_tree.lock);
66122diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
66123index a240b6f..4ce16ef 100644
66124--- a/fs/btrfs/sysfs.c
66125+++ b/fs/btrfs/sysfs.c
66126@@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj)
66127 complete(&root->kobj_unregister);
66128 }
66129
66130-static struct sysfs_ops btrfs_super_attr_ops = {
66131+static const struct sysfs_ops btrfs_super_attr_ops = {
66132 .show = btrfs_super_attr_show,
66133 .store = btrfs_super_attr_store,
66134 };
66135
66136-static struct sysfs_ops btrfs_root_attr_ops = {
66137+static const struct sysfs_ops btrfs_root_attr_ops = {
66138 .show = btrfs_root_attr_show,
66139 .store = btrfs_root_attr_store,
66140 };
66141diff --git a/fs/buffer.c b/fs/buffer.c
66142index 6fa5302..395d9f6 100644
66143--- a/fs/buffer.c
66144+++ b/fs/buffer.c
66145@@ -25,6 +25,7 @@
66146 #include <linux/percpu.h>
66147 #include <linux/slab.h>
66148 #include <linux/capability.h>
66149+#include <linux/security.h>
66150 #include <linux/blkdev.h>
66151 #include <linux/file.h>
66152 #include <linux/quotaops.h>
66153diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
66154index 3797e00..ce776f6 100644
66155--- a/fs/cachefiles/bind.c
66156+++ b/fs/cachefiles/bind.c
66157@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
66158 args);
66159
66160 /* start by checking things over */
66161- ASSERT(cache->fstop_percent >= 0 &&
66162- cache->fstop_percent < cache->fcull_percent &&
66163+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
66164 cache->fcull_percent < cache->frun_percent &&
66165 cache->frun_percent < 100);
66166
66167- ASSERT(cache->bstop_percent >= 0 &&
66168- cache->bstop_percent < cache->bcull_percent &&
66169+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
66170 cache->bcull_percent < cache->brun_percent &&
66171 cache->brun_percent < 100);
66172
66173diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
66174index 4618516..bb30d01 100644
66175--- a/fs/cachefiles/daemon.c
66176+++ b/fs/cachefiles/daemon.c
66177@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
66178 if (test_bit(CACHEFILES_DEAD, &cache->flags))
66179 return -EIO;
66180
66181- if (datalen < 0 || datalen > PAGE_SIZE - 1)
66182+ if (datalen > PAGE_SIZE - 1)
66183 return -EOPNOTSUPP;
66184
66185 /* drag the command string into the kernel so we can parse it */
66186@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
66187 if (args[0] != '%' || args[1] != '\0')
66188 return -EINVAL;
66189
66190- if (fstop < 0 || fstop >= cache->fcull_percent)
66191+ if (fstop >= cache->fcull_percent)
66192 return cachefiles_daemon_range_error(cache, args);
66193
66194 cache->fstop_percent = fstop;
66195@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
66196 if (args[0] != '%' || args[1] != '\0')
66197 return -EINVAL;
66198
66199- if (bstop < 0 || bstop >= cache->bcull_percent)
66200+ if (bstop >= cache->bcull_percent)
66201 return cachefiles_daemon_range_error(cache, args);
66202
66203 cache->bstop_percent = bstop;
66204diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
66205index f7c255f..fcd61de 100644
66206--- a/fs/cachefiles/internal.h
66207+++ b/fs/cachefiles/internal.h
66208@@ -56,7 +56,7 @@ struct cachefiles_cache {
66209 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
66210 struct rb_root active_nodes; /* active nodes (can't be culled) */
66211 rwlock_t active_lock; /* lock for active_nodes */
66212- atomic_t gravecounter; /* graveyard uniquifier */
66213+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
66214 unsigned frun_percent; /* when to stop culling (% files) */
66215 unsigned fcull_percent; /* when to start culling (% files) */
66216 unsigned fstop_percent; /* when to stop allocating (% files) */
66217@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
66218 * proc.c
66219 */
66220 #ifdef CONFIG_CACHEFILES_HISTOGRAM
66221-extern atomic_t cachefiles_lookup_histogram[HZ];
66222-extern atomic_t cachefiles_mkdir_histogram[HZ];
66223-extern atomic_t cachefiles_create_histogram[HZ];
66224+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
66225+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
66226+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
66227
66228 extern int __init cachefiles_proc_init(void);
66229 extern void cachefiles_proc_cleanup(void);
66230 static inline
66231-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
66232+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
66233 {
66234 unsigned long jif = jiffies - start_jif;
66235 if (jif >= HZ)
66236 jif = HZ - 1;
66237- atomic_inc(&histogram[jif]);
66238+ atomic_inc_unchecked(&histogram[jif]);
66239 }
66240
66241 #else
66242diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
66243index 14ac480..a62766c 100644
66244--- a/fs/cachefiles/namei.c
66245+++ b/fs/cachefiles/namei.c
66246@@ -250,7 +250,7 @@ try_again:
66247 /* first step is to make up a grave dentry in the graveyard */
66248 sprintf(nbuffer, "%08x%08x",
66249 (uint32_t) get_seconds(),
66250- (uint32_t) atomic_inc_return(&cache->gravecounter));
66251+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
66252
66253 /* do the multiway lock magic */
66254 trap = lock_rename(cache->graveyard, dir);
66255diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
66256index eccd339..4c1d995 100644
66257--- a/fs/cachefiles/proc.c
66258+++ b/fs/cachefiles/proc.c
66259@@ -14,9 +14,9 @@
66260 #include <linux/seq_file.h>
66261 #include "internal.h"
66262
66263-atomic_t cachefiles_lookup_histogram[HZ];
66264-atomic_t cachefiles_mkdir_histogram[HZ];
66265-atomic_t cachefiles_create_histogram[HZ];
66266+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
66267+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
66268+atomic_unchecked_t cachefiles_create_histogram[HZ];
66269
66270 /*
66271 * display the latency histogram
66272@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
66273 return 0;
66274 default:
66275 index = (unsigned long) v - 3;
66276- x = atomic_read(&cachefiles_lookup_histogram[index]);
66277- y = atomic_read(&cachefiles_mkdir_histogram[index]);
66278- z = atomic_read(&cachefiles_create_histogram[index]);
66279+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
66280+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
66281+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
66282 if (x == 0 && y == 0 && z == 0)
66283 return 0;
66284
66285diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
66286index a6c8c6f..5cf8517 100644
66287--- a/fs/cachefiles/rdwr.c
66288+++ b/fs/cachefiles/rdwr.c
66289@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
66290 old_fs = get_fs();
66291 set_fs(KERNEL_DS);
66292 ret = file->f_op->write(
66293- file, (const void __user *) data, len, &pos);
66294+ file, (const void __force_user *) data, len, &pos);
66295 set_fs(old_fs);
66296 kunmap(page);
66297 if (ret != len)
66298diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
66299index 20692fb..0098fb7 100644
66300--- a/fs/cifs/asn1.c
66301+++ b/fs/cifs/asn1.c
66302@@ -416,6 +416,9 @@ asn1_subid_decode(struct asn1_ctx *ctx, unsigned long *subid)
66303
66304 static int
66305 asn1_oid_decode(struct asn1_ctx *ctx,
66306+ unsigned char *eoc, unsigned long **oid, unsigned int *len) __size_overflow(2);
66307+static int
66308+asn1_oid_decode(struct asn1_ctx *ctx,
66309 unsigned char *eoc, unsigned long **oid, unsigned int *len)
66310 {
66311 unsigned long subid;
66312diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
66313index 42cec2a..2aba466 100644
66314--- a/fs/cifs/cifs_debug.c
66315+++ b/fs/cifs/cifs_debug.c
66316@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
66317 tcon = list_entry(tmp3,
66318 struct cifsTconInfo,
66319 tcon_list);
66320- atomic_set(&tcon->num_smbs_sent, 0);
66321- atomic_set(&tcon->num_writes, 0);
66322- atomic_set(&tcon->num_reads, 0);
66323- atomic_set(&tcon->num_oplock_brks, 0);
66324- atomic_set(&tcon->num_opens, 0);
66325- atomic_set(&tcon->num_posixopens, 0);
66326- atomic_set(&tcon->num_posixmkdirs, 0);
66327- atomic_set(&tcon->num_closes, 0);
66328- atomic_set(&tcon->num_deletes, 0);
66329- atomic_set(&tcon->num_mkdirs, 0);
66330- atomic_set(&tcon->num_rmdirs, 0);
66331- atomic_set(&tcon->num_renames, 0);
66332- atomic_set(&tcon->num_t2renames, 0);
66333- atomic_set(&tcon->num_ffirst, 0);
66334- atomic_set(&tcon->num_fnext, 0);
66335- atomic_set(&tcon->num_fclose, 0);
66336- atomic_set(&tcon->num_hardlinks, 0);
66337- atomic_set(&tcon->num_symlinks, 0);
66338- atomic_set(&tcon->num_locks, 0);
66339+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
66340+ atomic_set_unchecked(&tcon->num_writes, 0);
66341+ atomic_set_unchecked(&tcon->num_reads, 0);
66342+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
66343+ atomic_set_unchecked(&tcon->num_opens, 0);
66344+ atomic_set_unchecked(&tcon->num_posixopens, 0);
66345+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
66346+ atomic_set_unchecked(&tcon->num_closes, 0);
66347+ atomic_set_unchecked(&tcon->num_deletes, 0);
66348+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
66349+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
66350+ atomic_set_unchecked(&tcon->num_renames, 0);
66351+ atomic_set_unchecked(&tcon->num_t2renames, 0);
66352+ atomic_set_unchecked(&tcon->num_ffirst, 0);
66353+ atomic_set_unchecked(&tcon->num_fnext, 0);
66354+ atomic_set_unchecked(&tcon->num_fclose, 0);
66355+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
66356+ atomic_set_unchecked(&tcon->num_symlinks, 0);
66357+ atomic_set_unchecked(&tcon->num_locks, 0);
66358 }
66359 }
66360 }
66361@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
66362 if (tcon->need_reconnect)
66363 seq_puts(m, "\tDISCONNECTED ");
66364 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
66365- atomic_read(&tcon->num_smbs_sent),
66366- atomic_read(&tcon->num_oplock_brks));
66367+ atomic_read_unchecked(&tcon->num_smbs_sent),
66368+ atomic_read_unchecked(&tcon->num_oplock_brks));
66369 seq_printf(m, "\nReads: %d Bytes: %lld",
66370- atomic_read(&tcon->num_reads),
66371+ atomic_read_unchecked(&tcon->num_reads),
66372 (long long)(tcon->bytes_read));
66373 seq_printf(m, "\nWrites: %d Bytes: %lld",
66374- atomic_read(&tcon->num_writes),
66375+ atomic_read_unchecked(&tcon->num_writes),
66376 (long long)(tcon->bytes_written));
66377 seq_printf(m, "\nFlushes: %d",
66378- atomic_read(&tcon->num_flushes));
66379+ atomic_read_unchecked(&tcon->num_flushes));
66380 seq_printf(m, "\nLocks: %d HardLinks: %d "
66381 "Symlinks: %d",
66382- atomic_read(&tcon->num_locks),
66383- atomic_read(&tcon->num_hardlinks),
66384- atomic_read(&tcon->num_symlinks));
66385+ atomic_read_unchecked(&tcon->num_locks),
66386+ atomic_read_unchecked(&tcon->num_hardlinks),
66387+ atomic_read_unchecked(&tcon->num_symlinks));
66388 seq_printf(m, "\nOpens: %d Closes: %d "
66389 "Deletes: %d",
66390- atomic_read(&tcon->num_opens),
66391- atomic_read(&tcon->num_closes),
66392- atomic_read(&tcon->num_deletes));
66393+ atomic_read_unchecked(&tcon->num_opens),
66394+ atomic_read_unchecked(&tcon->num_closes),
66395+ atomic_read_unchecked(&tcon->num_deletes));
66396 seq_printf(m, "\nPosix Opens: %d "
66397 "Posix Mkdirs: %d",
66398- atomic_read(&tcon->num_posixopens),
66399- atomic_read(&tcon->num_posixmkdirs));
66400+ atomic_read_unchecked(&tcon->num_posixopens),
66401+ atomic_read_unchecked(&tcon->num_posixmkdirs));
66402 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
66403- atomic_read(&tcon->num_mkdirs),
66404- atomic_read(&tcon->num_rmdirs));
66405+ atomic_read_unchecked(&tcon->num_mkdirs),
66406+ atomic_read_unchecked(&tcon->num_rmdirs));
66407 seq_printf(m, "\nRenames: %d T2 Renames %d",
66408- atomic_read(&tcon->num_renames),
66409- atomic_read(&tcon->num_t2renames));
66410+ atomic_read_unchecked(&tcon->num_renames),
66411+ atomic_read_unchecked(&tcon->num_t2renames));
66412 seq_printf(m, "\nFindFirst: %d FNext %d "
66413 "FClose %d",
66414- atomic_read(&tcon->num_ffirst),
66415- atomic_read(&tcon->num_fnext),
66416- atomic_read(&tcon->num_fclose));
66417+ atomic_read_unchecked(&tcon->num_ffirst),
66418+ atomic_read_unchecked(&tcon->num_fnext),
66419+ atomic_read_unchecked(&tcon->num_fclose));
66420 }
66421 }
66422 }
66423diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
66424index 1445407..68cb0dc 100644
66425--- a/fs/cifs/cifsfs.c
66426+++ b/fs/cifs/cifsfs.c
66427@@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
66428 cifs_req_cachep = kmem_cache_create("cifs_request",
66429 CIFSMaxBufSize +
66430 MAX_CIFS_HDR_SIZE, 0,
66431- SLAB_HWCACHE_ALIGN, NULL);
66432+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
66433 if (cifs_req_cachep == NULL)
66434 return -ENOMEM;
66435
66436@@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
66437 efficient to alloc 1 per page off the slab compared to 17K (5page)
66438 alloc of large cifs buffers even when page debugging is on */
66439 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
66440- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
66441+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
66442 NULL);
66443 if (cifs_sm_req_cachep == NULL) {
66444 mempool_destroy(cifs_req_poolp);
66445@@ -991,8 +991,8 @@ init_cifs(void)
66446 atomic_set(&bufAllocCount, 0);
66447 atomic_set(&smBufAllocCount, 0);
66448 #ifdef CONFIG_CIFS_STATS2
66449- atomic_set(&totBufAllocCount, 0);
66450- atomic_set(&totSmBufAllocCount, 0);
66451+ atomic_set_unchecked(&totBufAllocCount, 0);
66452+ atomic_set_unchecked(&totSmBufAllocCount, 0);
66453 #endif /* CONFIG_CIFS_STATS2 */
66454
66455 atomic_set(&midCount, 0);
66456diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
66457index e29581e..1c22bab 100644
66458--- a/fs/cifs/cifsglob.h
66459+++ b/fs/cifs/cifsglob.h
66460@@ -252,28 +252,28 @@ struct cifsTconInfo {
66461 __u16 Flags; /* optional support bits */
66462 enum statusEnum tidStatus;
66463 #ifdef CONFIG_CIFS_STATS
66464- atomic_t num_smbs_sent;
66465- atomic_t num_writes;
66466- atomic_t num_reads;
66467- atomic_t num_flushes;
66468- atomic_t num_oplock_brks;
66469- atomic_t num_opens;
66470- atomic_t num_closes;
66471- atomic_t num_deletes;
66472- atomic_t num_mkdirs;
66473- atomic_t num_posixopens;
66474- atomic_t num_posixmkdirs;
66475- atomic_t num_rmdirs;
66476- atomic_t num_renames;
66477- atomic_t num_t2renames;
66478- atomic_t num_ffirst;
66479- atomic_t num_fnext;
66480- atomic_t num_fclose;
66481- atomic_t num_hardlinks;
66482- atomic_t num_symlinks;
66483- atomic_t num_locks;
66484- atomic_t num_acl_get;
66485- atomic_t num_acl_set;
66486+ atomic_unchecked_t num_smbs_sent;
66487+ atomic_unchecked_t num_writes;
66488+ atomic_unchecked_t num_reads;
66489+ atomic_unchecked_t num_flushes;
66490+ atomic_unchecked_t num_oplock_brks;
66491+ atomic_unchecked_t num_opens;
66492+ atomic_unchecked_t num_closes;
66493+ atomic_unchecked_t num_deletes;
66494+ atomic_unchecked_t num_mkdirs;
66495+ atomic_unchecked_t num_posixopens;
66496+ atomic_unchecked_t num_posixmkdirs;
66497+ atomic_unchecked_t num_rmdirs;
66498+ atomic_unchecked_t num_renames;
66499+ atomic_unchecked_t num_t2renames;
66500+ atomic_unchecked_t num_ffirst;
66501+ atomic_unchecked_t num_fnext;
66502+ atomic_unchecked_t num_fclose;
66503+ atomic_unchecked_t num_hardlinks;
66504+ atomic_unchecked_t num_symlinks;
66505+ atomic_unchecked_t num_locks;
66506+ atomic_unchecked_t num_acl_get;
66507+ atomic_unchecked_t num_acl_set;
66508 #ifdef CONFIG_CIFS_STATS2
66509 unsigned long long time_writes;
66510 unsigned long long time_reads;
66511@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
66512 }
66513
66514 #ifdef CONFIG_CIFS_STATS
66515-#define cifs_stats_inc atomic_inc
66516+#define cifs_stats_inc atomic_inc_unchecked
66517
66518 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
66519 unsigned int bytes)
66520@@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
66521 /* Various Debug counters */
66522 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
66523 #ifdef CONFIG_CIFS_STATS2
66524-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
66525-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
66526+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
66527+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
66528 #endif
66529 GLOBAL_EXTERN atomic_t smBufAllocCount;
66530 GLOBAL_EXTERN atomic_t midCount;
66531diff --git a/fs/cifs/link.c b/fs/cifs/link.c
66532index fc1e048..28b3441 100644
66533--- a/fs/cifs/link.c
66534+++ b/fs/cifs/link.c
66535@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
66536
66537 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
66538 {
66539- char *p = nd_get_link(nd);
66540+ const char *p = nd_get_link(nd);
66541 if (!IS_ERR(p))
66542 kfree(p);
66543 }
66544diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
66545index 95b82e8..12a538d 100644
66546--- a/fs/cifs/misc.c
66547+++ b/fs/cifs/misc.c
66548@@ -155,7 +155,7 @@ cifs_buf_get(void)
66549 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
66550 atomic_inc(&bufAllocCount);
66551 #ifdef CONFIG_CIFS_STATS2
66552- atomic_inc(&totBufAllocCount);
66553+ atomic_inc_unchecked(&totBufAllocCount);
66554 #endif /* CONFIG_CIFS_STATS2 */
66555 }
66556
66557@@ -190,7 +190,7 @@ cifs_small_buf_get(void)
66558 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
66559 atomic_inc(&smBufAllocCount);
66560 #ifdef CONFIG_CIFS_STATS2
66561- atomic_inc(&totSmBufAllocCount);
66562+ atomic_inc_unchecked(&totSmBufAllocCount);
66563 #endif /* CONFIG_CIFS_STATS2 */
66564
66565 }
66566diff --git a/fs/coda/cache.c b/fs/coda/cache.c
66567index a5bf577..6d19845 100644
66568--- a/fs/coda/cache.c
66569+++ b/fs/coda/cache.c
66570@@ -24,14 +24,14 @@
66571 #include <linux/coda_fs_i.h>
66572 #include <linux/coda_cache.h>
66573
66574-static atomic_t permission_epoch = ATOMIC_INIT(0);
66575+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
66576
66577 /* replace or extend an acl cache hit */
66578 void coda_cache_enter(struct inode *inode, int mask)
66579 {
66580 struct coda_inode_info *cii = ITOC(inode);
66581
66582- cii->c_cached_epoch = atomic_read(&permission_epoch);
66583+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
66584 if (cii->c_uid != current_fsuid()) {
66585 cii->c_uid = current_fsuid();
66586 cii->c_cached_perm = mask;
66587@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask)
66588 void coda_cache_clear_inode(struct inode *inode)
66589 {
66590 struct coda_inode_info *cii = ITOC(inode);
66591- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
66592+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
66593 }
66594
66595 /* remove all acl caches */
66596 void coda_cache_clear_all(struct super_block *sb)
66597 {
66598- atomic_inc(&permission_epoch);
66599+ atomic_inc_unchecked(&permission_epoch);
66600 }
66601
66602
66603@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask)
66604
66605 hit = (mask & cii->c_cached_perm) == mask &&
66606 cii->c_uid == current_fsuid() &&
66607- cii->c_cached_epoch == atomic_read(&permission_epoch);
66608+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
66609
66610 return hit;
66611 }
66612diff --git a/fs/compat.c b/fs/compat.c
66613index d1e2411..9a958d2 100644
66614--- a/fs/compat.c
66615+++ b/fs/compat.c
66616@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
66617 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
66618 {
66619 compat_ino_t ino = stat->ino;
66620- typeof(ubuf->st_uid) uid = 0;
66621- typeof(ubuf->st_gid) gid = 0;
66622+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
66623+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
66624 int err;
66625
66626 SET_UID(uid, stat->uid);
66627@@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
66628
66629 set_fs(KERNEL_DS);
66630 /* The __user pointer cast is valid because of the set_fs() */
66631- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
66632+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
66633 set_fs(oldfs);
66634 /* truncating is ok because it's a user address */
66635 if (!ret)
66636@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
66637
66638 struct compat_readdir_callback {
66639 struct compat_old_linux_dirent __user *dirent;
66640+ struct file * file;
66641 int result;
66642 };
66643
66644@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
66645 buf->result = -EOVERFLOW;
66646 return -EOVERFLOW;
66647 }
66648+
66649+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
66650+ return 0;
66651+
66652 buf->result++;
66653 dirent = buf->dirent;
66654 if (!access_ok(VERIFY_WRITE, dirent,
66655@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
66656
66657 buf.result = 0;
66658 buf.dirent = dirent;
66659+ buf.file = file;
66660
66661 error = vfs_readdir(file, compat_fillonedir, &buf);
66662 if (buf.result)
66663@@ -899,6 +905,7 @@ struct compat_linux_dirent {
66664 struct compat_getdents_callback {
66665 struct compat_linux_dirent __user *current_dir;
66666 struct compat_linux_dirent __user *previous;
66667+ struct file * file;
66668 int count;
66669 int error;
66670 };
66671@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
66672 buf->error = -EOVERFLOW;
66673 return -EOVERFLOW;
66674 }
66675+
66676+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
66677+ return 0;
66678+
66679 dirent = buf->previous;
66680 if (dirent) {
66681 if (__put_user(offset, &dirent->d_off))
66682@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
66683 buf.previous = NULL;
66684 buf.count = count;
66685 buf.error = 0;
66686+ buf.file = file;
66687
66688 error = vfs_readdir(file, compat_filldir, &buf);
66689 if (error >= 0)
66690@@ -987,6 +999,7 @@ out:
66691 struct compat_getdents_callback64 {
66692 struct linux_dirent64 __user *current_dir;
66693 struct linux_dirent64 __user *previous;
66694+ struct file * file;
66695 int count;
66696 int error;
66697 };
66698@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
66699 buf->error = -EINVAL; /* only used if we fail.. */
66700 if (reclen > buf->count)
66701 return -EINVAL;
66702+
66703+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
66704+ return 0;
66705+
66706 dirent = buf->previous;
66707
66708 if (dirent) {
66709@@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
66710 buf.previous = NULL;
66711 buf.count = count;
66712 buf.error = 0;
66713+ buf.file = file;
66714
66715 error = vfs_readdir(file, compat_filldir64, &buf);
66716 if (error >= 0)
66717 error = buf.error;
66718 lastdirent = buf.previous;
66719 if (lastdirent) {
66720- typeof(lastdirent->d_off) d_off = file->f_pos;
66721+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
66722 if (__put_user_unaligned(d_off, &lastdirent->d_off))
66723 error = -EFAULT;
66724 else
66725@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
66726 * verify all the pointers
66727 */
66728 ret = -EINVAL;
66729- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
66730+ if (nr_segs > UIO_MAXIOV)
66731 goto out;
66732 if (!file->f_op)
66733 goto out;
66734@@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
66735 compat_uptr_t __user *envp,
66736 struct pt_regs * regs)
66737 {
66738+#ifdef CONFIG_GRKERNSEC
66739+ struct file *old_exec_file;
66740+ struct acl_subject_label *old_acl;
66741+ struct rlimit old_rlim[RLIM_NLIMITS];
66742+#endif
66743 struct linux_binprm *bprm;
66744 struct file *file;
66745 struct files_struct *displaced;
66746 bool clear_in_exec;
66747 int retval;
66748+ const struct cred *cred = current_cred();
66749+
66750+ /*
66751+ * We move the actual failure in case of RLIMIT_NPROC excess from
66752+ * set*uid() to execve() because too many poorly written programs
66753+ * don't check setuid() return code. Here we additionally recheck
66754+ * whether NPROC limit is still exceeded.
66755+ */
66756+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
66757+
66758+ if ((current->flags & PF_NPROC_EXCEEDED) &&
66759+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
66760+ retval = -EAGAIN;
66761+ goto out_ret;
66762+ }
66763+
66764+ /* We're below the limit (still or again), so we don't want to make
66765+ * further execve() calls fail. */
66766+ current->flags &= ~PF_NPROC_EXCEEDED;
66767
66768 retval = unshare_files(&displaced);
66769 if (retval)
66770@@ -1493,12 +1535,26 @@ int compat_do_execve(char * filename,
66771 if (IS_ERR(file))
66772 goto out_unmark;
66773
66774+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
66775+ retval = -EPERM;
66776+ goto out_file;
66777+ }
66778+
66779 sched_exec();
66780
66781 bprm->file = file;
66782 bprm->filename = filename;
66783 bprm->interp = filename;
66784
66785+ if (gr_process_user_ban()) {
66786+ retval = -EPERM;
66787+ goto out_file;
66788+ }
66789+
66790+ retval = -EACCES;
66791+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
66792+ goto out_file;
66793+
66794 retval = bprm_mm_init(bprm);
66795 if (retval)
66796 goto out_file;
66797@@ -1515,24 +1571,63 @@ int compat_do_execve(char * filename,
66798 if (retval < 0)
66799 goto out;
66800
66801+#ifdef CONFIG_GRKERNSEC
66802+ old_acl = current->acl;
66803+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
66804+ old_exec_file = current->exec_file;
66805+ get_file(file);
66806+ current->exec_file = file;
66807+#endif
66808+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66809+ /* limit suid stack to 8MB
66810+ we saved the old limits above and will restore them if this exec fails
66811+ */
66812+ if ((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid()))
66813+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
66814+#endif
66815+
66816+ if (!gr_tpe_allow(file)) {
66817+ retval = -EACCES;
66818+ goto out_fail;
66819+ }
66820+
66821+ if (gr_check_crash_exec(file)) {
66822+ retval = -EACCES;
66823+ goto out_fail;
66824+ }
66825+
66826+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
66827+ bprm->unsafe);
66828+ if (retval < 0)
66829+ goto out_fail;
66830+
66831 retval = copy_strings_kernel(1, &bprm->filename, bprm);
66832 if (retval < 0)
66833- goto out;
66834+ goto out_fail;
66835
66836 bprm->exec = bprm->p;
66837 retval = compat_copy_strings(bprm->envc, envp, bprm);
66838 if (retval < 0)
66839- goto out;
66840+ goto out_fail;
66841
66842 retval = compat_copy_strings(bprm->argc, argv, bprm);
66843 if (retval < 0)
66844- goto out;
66845+ goto out_fail;
66846+
66847+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
66848+
66849+ gr_handle_exec_args_compat(bprm, argv);
66850
66851 retval = search_binary_handler(bprm, regs);
66852 if (retval < 0)
66853- goto out;
66854+ goto out_fail;
66855+#ifdef CONFIG_GRKERNSEC
66856+ if (old_exec_file)
66857+ fput(old_exec_file);
66858+#endif
66859
66860 /* execve succeeded */
66861+ increment_exec_counter();
66862 current->fs->in_exec = 0;
66863 current->in_execve = 0;
66864 acct_update_integrals(current);
66865@@ -1541,6 +1636,14 @@ int compat_do_execve(char * filename,
66866 put_files_struct(displaced);
66867 return retval;
66868
66869+out_fail:
66870+#ifdef CONFIG_GRKERNSEC
66871+ current->acl = old_acl;
66872+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
66873+ fput(current->exec_file);
66874+ current->exec_file = old_exec_file;
66875+#endif
66876+
66877 out:
66878 if (bprm->mm) {
66879 acct_arg_size(bprm, 0);
66880@@ -1711,6 +1814,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
66881 struct fdtable *fdt;
66882 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
66883
66884+ pax_track_stack();
66885+
66886 if (n < 0)
66887 goto out_nofds;
66888
66889@@ -2151,7 +2256,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd,
66890 oldfs = get_fs();
66891 set_fs(KERNEL_DS);
66892 /* The __user pointer casts are valid because of the set_fs() */
66893- err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
66894+ err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
66895 set_fs(oldfs);
66896
66897 if (err)
66898diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
66899index 0adced2..bbb1b0d 100644
66900--- a/fs/compat_binfmt_elf.c
66901+++ b/fs/compat_binfmt_elf.c
66902@@ -29,10 +29,12 @@
66903 #undef elfhdr
66904 #undef elf_phdr
66905 #undef elf_note
66906+#undef elf_dyn
66907 #undef elf_addr_t
66908 #define elfhdr elf32_hdr
66909 #define elf_phdr elf32_phdr
66910 #define elf_note elf32_note
66911+#define elf_dyn Elf32_Dyn
66912 #define elf_addr_t Elf32_Addr
66913
66914 /*
66915diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
66916index d84e705..d8c364c 100644
66917--- a/fs/compat_ioctl.c
66918+++ b/fs/compat_ioctl.c
66919@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
66920 up = (struct compat_video_spu_palette __user *) arg;
66921 err = get_user(palp, &up->palette);
66922 err |= get_user(length, &up->length);
66923+ if (err)
66924+ return -EFAULT;
66925
66926 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
66927 err = put_user(compat_ptr(palp), &up_native->palette);
66928@@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
66929 return -EFAULT;
66930 if (__get_user(udata, &ss32->iomem_base))
66931 return -EFAULT;
66932- ss.iomem_base = compat_ptr(udata);
66933+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
66934 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
66935 __get_user(ss.port_high, &ss32->port_high))
66936 return -EFAULT;
66937@@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
66938 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
66939 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
66940 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
66941- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
66942+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
66943 return -EFAULT;
66944
66945 return ioctl_preallocate(file, p);
66946diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
66947index 8e48b52..f01ed91 100644
66948--- a/fs/configfs/dir.c
66949+++ b/fs/configfs/dir.c
66950@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
66951 }
66952 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
66953 struct configfs_dirent *next;
66954- const char * name;
66955+ const unsigned char * name;
66956+ char d_name[sizeof(next->s_dentry->d_iname)];
66957 int len;
66958
66959 next = list_entry(p, struct configfs_dirent,
66960@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
66961 continue;
66962
66963 name = configfs_get_name(next);
66964- len = strlen(name);
66965+ if (next->s_dentry && name == next->s_dentry->d_iname) {
66966+ len = next->s_dentry->d_name.len;
66967+ memcpy(d_name, name, len);
66968+ name = d_name;
66969+ } else
66970+ len = strlen(name);
66971 if (next->s_dentry)
66972 ino = next->s_dentry->d_inode->i_ino;
66973 else
66974diff --git a/fs/dcache.c b/fs/dcache.c
66975index 44c0aea..2529092 100644
66976--- a/fs/dcache.c
66977+++ b/fs/dcache.c
66978@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
66979
66980 static struct kmem_cache *dentry_cache __read_mostly;
66981
66982-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
66983-
66984 /*
66985 * This is the single most critical data structure when it comes
66986 * to the dcache: the hashtable for lookups. Somebody should try
66987@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages)
66988 mempages -= reserve;
66989
66990 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
66991- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
66992+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
66993
66994 dcache_init();
66995 inode_init();
66996diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
66997index 39c6ee8..dcee0f1 100644
66998--- a/fs/debugfs/inode.c
66999+++ b/fs/debugfs/inode.c
67000@@ -269,7 +269,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
67001 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
67002 {
67003 return debugfs_create_file(name,
67004+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67005+ S_IFDIR | S_IRWXU,
67006+#else
67007 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
67008+#endif
67009 parent, NULL, NULL);
67010 }
67011 EXPORT_SYMBOL_GPL(debugfs_create_dir);
67012diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
67013index c010ecf..a8d8c59 100644
67014--- a/fs/dlm/lockspace.c
67015+++ b/fs/dlm/lockspace.c
67016@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k)
67017 kfree(ls);
67018 }
67019
67020-static struct sysfs_ops dlm_attr_ops = {
67021+static const struct sysfs_ops dlm_attr_ops = {
67022 .show = dlm_attr_show,
67023 .store = dlm_attr_store,
67024 };
67025diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
67026index 7a5f1ac..62fa913 100644
67027--- a/fs/ecryptfs/crypto.c
67028+++ b/fs/ecryptfs/crypto.c
67029@@ -418,17 +418,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
67030 rc);
67031 goto out;
67032 }
67033- if (unlikely(ecryptfs_verbosity > 0)) {
67034- ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
67035- "with iv:\n");
67036- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
67037- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
67038- "encryption:\n");
67039- ecryptfs_dump_hex((char *)
67040- (page_address(page)
67041- + (extent_offset * crypt_stat->extent_size)),
67042- 8);
67043- }
67044 rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
67045 page, (extent_offset
67046 * crypt_stat->extent_size),
67047@@ -441,14 +430,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
67048 goto out;
67049 }
67050 rc = 0;
67051- if (unlikely(ecryptfs_verbosity > 0)) {
67052- ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16x]; "
67053- "rc = [%d]\n", (extent_base + extent_offset),
67054- rc);
67055- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
67056- "encryption:\n");
67057- ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
67058- }
67059 out:
67060 return rc;
67061 }
67062@@ -545,17 +526,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
67063 rc);
67064 goto out;
67065 }
67066- if (unlikely(ecryptfs_verbosity > 0)) {
67067- ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
67068- "with iv:\n");
67069- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
67070- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
67071- "decryption:\n");
67072- ecryptfs_dump_hex((char *)
67073- (page_address(enc_extent_page)
67074- + (extent_offset * crypt_stat->extent_size)),
67075- 8);
67076- }
67077 rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
67078 (extent_offset
67079 * crypt_stat->extent_size),
67080@@ -569,16 +539,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
67081 goto out;
67082 }
67083 rc = 0;
67084- if (unlikely(ecryptfs_verbosity > 0)) {
67085- ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16x]; "
67086- "rc = [%d]\n", (extent_base + extent_offset),
67087- rc);
67088- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
67089- "decryption:\n");
67090- ecryptfs_dump_hex((char *)(page_address(page)
67091- + (extent_offset
67092- * crypt_stat->extent_size)), 8);
67093- }
67094 out:
67095 return rc;
67096 }
67097@@ -1455,6 +1415,25 @@ static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat)
67098 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
67099 }
67100
67101+void ecryptfs_i_size_init(const char *page_virt, struct inode *inode)
67102+{
67103+ struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
67104+ struct ecryptfs_crypt_stat *crypt_stat;
67105+ u64 file_size;
67106+
67107+ crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
67108+ mount_crypt_stat =
67109+ &ecryptfs_superblock_to_private(inode->i_sb)->mount_crypt_stat;
67110+ if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
67111+ file_size = i_size_read(ecryptfs_inode_to_lower(inode));
67112+ if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
67113+ file_size += crypt_stat->num_header_bytes_at_front;
67114+ } else
67115+ file_size = get_unaligned_be64(page_virt);
67116+ i_size_write(inode, (loff_t)file_size);
67117+ crypt_stat->flags |= ECRYPTFS_I_SIZE_INITIALIZED;
67118+}
67119+
67120 /**
67121 * ecryptfs_read_headers_virt
67122 * @page_virt: The virtual address into which to read the headers
67123@@ -1485,6 +1464,8 @@ static int ecryptfs_read_headers_virt(char *page_virt,
67124 rc = -EINVAL;
67125 goto out;
67126 }
67127+ if (!(crypt_stat->flags & ECRYPTFS_I_SIZE_INITIALIZED))
67128+ ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode);
67129 offset += MAGIC_ECRYPTFS_MARKER_SIZE_BYTES;
67130 rc = ecryptfs_process_flags(crypt_stat, (page_virt + offset),
67131 &bytes_read);
67132diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
67133index 542f625..9685315 100644
67134--- a/fs/ecryptfs/ecryptfs_kernel.h
67135+++ b/fs/ecryptfs/ecryptfs_kernel.h
67136@@ -270,6 +270,7 @@ struct ecryptfs_crypt_stat {
67137 #define ECRYPTFS_ENCFN_USE_MOUNT_FNEK 0x00001000
67138 #define ECRYPTFS_ENCFN_USE_FEK 0x00002000
67139 #define ECRYPTFS_UNLINK_SIGS 0x00004000
67140+#define ECRYPTFS_I_SIZE_INITIALIZED 0x00008000
67141 u32 flags;
67142 unsigned int file_version;
67143 size_t iv_bytes;
67144@@ -619,6 +620,7 @@ struct ecryptfs_open_req {
67145 int ecryptfs_interpose(struct dentry *hidden_dentry,
67146 struct dentry *this_dentry, struct super_block *sb,
67147 u32 flags);
67148+void ecryptfs_i_size_init(const char *page_virt, struct inode *inode);
67149 int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
67150 struct dentry *lower_dentry,
67151 struct inode *ecryptfs_dir_inode,
67152diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
67153index 3015389..49129f4 100644
67154--- a/fs/ecryptfs/file.c
67155+++ b/fs/ecryptfs/file.c
67156@@ -237,7 +237,8 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
67157 goto out_free;
67158 }
67159 rc = 0;
67160- crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
67161+ crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
67162+ | ECRYPTFS_ENCRYPTED);
67163 mutex_unlock(&crypt_stat->cs_mutex);
67164 goto out;
67165 }
67166@@ -347,7 +348,6 @@ const struct file_operations ecryptfs_main_fops = {
67167 #ifdef CONFIG_COMPAT
67168 .compat_ioctl = ecryptfs_compat_ioctl,
67169 #endif
67170- .mmap = generic_file_mmap,
67171 .open = ecryptfs_open,
67172 .flush = ecryptfs_flush,
67173 .release = ecryptfs_release,
67174diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
67175index 4434e8f..fa05803 100644
67176--- a/fs/ecryptfs/inode.c
67177+++ b/fs/ecryptfs/inode.c
67178@@ -256,10 +256,8 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
67179 struct dentry *lower_dir_dentry;
67180 struct vfsmount *lower_mnt;
67181 struct inode *lower_inode;
67182- struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
67183 struct ecryptfs_crypt_stat *crypt_stat;
67184 char *page_virt = NULL;
67185- u64 file_size;
67186 int rc = 0;
67187
67188 lower_dir_dentry = lower_dentry->d_parent;
67189@@ -334,18 +332,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
67190 }
67191 crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
67192 }
67193- mount_crypt_stat = &ecryptfs_superblock_to_private(
67194- ecryptfs_dentry->d_sb)->mount_crypt_stat;
67195- if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
67196- if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
67197- file_size = (crypt_stat->num_header_bytes_at_front
67198- + i_size_read(lower_dentry->d_inode));
67199- else
67200- file_size = i_size_read(lower_dentry->d_inode);
67201- } else {
67202- file_size = get_unaligned_be64(page_virt);
67203- }
67204- i_size_write(ecryptfs_dentry->d_inode, (loff_t)file_size);
67205+ ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode);
67206 out_free_kmem:
67207 kmem_cache_free(ecryptfs_header_cache_2, page_virt);
67208 goto out;
67209@@ -660,7 +647,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
67210 old_fs = get_fs();
67211 set_fs(get_ds());
67212 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
67213- (char __user *)lower_buf,
67214+ (char __force_user *)lower_buf,
67215 lower_bufsiz);
67216 set_fs(old_fs);
67217 if (rc < 0)
67218@@ -706,7 +693,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
67219 }
67220 old_fs = get_fs();
67221 set_fs(get_ds());
67222- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
67223+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
67224 set_fs(old_fs);
67225 if (rc < 0)
67226 goto out_free;
67227@@ -964,7 +951,8 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
67228 goto out;
67229 }
67230 rc = 0;
67231- crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
67232+ crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
67233+ | ECRYPTFS_ENCRYPTED);
67234 }
67235 }
67236 mutex_unlock(&crypt_stat->cs_mutex);
67237diff --git a/fs/exec.c b/fs/exec.c
67238index 86fafc6..6a109b9 100644
67239--- a/fs/exec.c
67240+++ b/fs/exec.c
67241@@ -56,12 +56,28 @@
67242 #include <linux/fsnotify.h>
67243 #include <linux/fs_struct.h>
67244 #include <linux/pipe_fs_i.h>
67245+#include <linux/random.h>
67246+#include <linux/seq_file.h>
67247+
67248+#ifdef CONFIG_PAX_REFCOUNT
67249+#include <linux/kallsyms.h>
67250+#include <linux/kdebug.h>
67251+#endif
67252
67253 #include <asm/uaccess.h>
67254 #include <asm/mmu_context.h>
67255 #include <asm/tlb.h>
67256 #include "internal.h"
67257
67258+#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
67259+void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
67260+#endif
67261+
67262+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
67263+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
67264+EXPORT_SYMBOL(pax_set_initial_flags_func);
67265+#endif
67266+
67267 int core_uses_pid;
67268 char core_pattern[CORENAME_MAX_SIZE] = "core";
67269 unsigned int core_pipe_limit;
67270@@ -178,18 +194,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
67271 int write)
67272 {
67273 struct page *page;
67274- int ret;
67275
67276-#ifdef CONFIG_STACK_GROWSUP
67277- if (write) {
67278- ret = expand_stack_downwards(bprm->vma, pos);
67279- if (ret < 0)
67280- return NULL;
67281- }
67282-#endif
67283- ret = get_user_pages(current, bprm->mm, pos,
67284- 1, write, 1, &page, NULL);
67285- if (ret <= 0)
67286+ if (0 > expand_stack_downwards(bprm->vma, pos))
67287+ return NULL;
67288+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
67289 return NULL;
67290
67291 if (write) {
67292@@ -205,6 +213,17 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
67293 if (size <= ARG_MAX)
67294 return page;
67295
67296+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67297+ // only allow 512KB for argv+env on suid/sgid binaries
67298+ // to prevent easy ASLR exhaustion
67299+ if (((bprm->cred->euid != current_euid()) ||
67300+ (bprm->cred->egid != current_egid())) &&
67301+ (size > (512 * 1024))) {
67302+ put_page(page);
67303+ return NULL;
67304+ }
67305+#endif
67306+
67307 /*
67308 * Limit to 1/4-th the stack size for the argv+env strings.
67309 * This ensures that:
67310@@ -263,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
67311 vma->vm_end = STACK_TOP_MAX;
67312 vma->vm_start = vma->vm_end - PAGE_SIZE;
67313 vma->vm_flags = VM_STACK_FLAGS;
67314+
67315+#ifdef CONFIG_PAX_SEGMEXEC
67316+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
67317+#endif
67318+
67319 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
67320
67321 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
67322@@ -276,6 +300,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
67323 mm->stack_vm = mm->total_vm = 1;
67324 up_write(&mm->mmap_sem);
67325 bprm->p = vma->vm_end - sizeof(void *);
67326+
67327+#ifdef CONFIG_PAX_RANDUSTACK
67328+ if (randomize_va_space)
67329+ bprm->p ^= random32() & ~PAGE_MASK;
67330+#endif
67331+
67332 return 0;
67333 err:
67334 up_write(&mm->mmap_sem);
67335@@ -510,7 +540,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
67336 int r;
67337 mm_segment_t oldfs = get_fs();
67338 set_fs(KERNEL_DS);
67339- r = copy_strings(argc, (char __user * __user *)argv, bprm);
67340+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
67341 set_fs(oldfs);
67342 return r;
67343 }
67344@@ -540,7 +570,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
67345 unsigned long new_end = old_end - shift;
67346 struct mmu_gather *tlb;
67347
67348- BUG_ON(new_start > new_end);
67349+ if (new_start >= new_end || new_start < mmap_min_addr)
67350+ return -ENOMEM;
67351
67352 /*
67353 * ensure there are no vmas between where we want to go
67354@@ -549,6 +580,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
67355 if (vma != find_vma(mm, new_start))
67356 return -EFAULT;
67357
67358+#ifdef CONFIG_PAX_SEGMEXEC
67359+ BUG_ON(pax_find_mirror_vma(vma));
67360+#endif
67361+
67362 /*
67363 * cover the whole range: [new_start, old_end)
67364 */
67365@@ -630,10 +665,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
67366 stack_top = arch_align_stack(stack_top);
67367 stack_top = PAGE_ALIGN(stack_top);
67368
67369- if (unlikely(stack_top < mmap_min_addr) ||
67370- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
67371- return -ENOMEM;
67372-
67373 stack_shift = vma->vm_end - stack_top;
67374
67375 bprm->p -= stack_shift;
67376@@ -645,6 +676,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
67377 bprm->exec -= stack_shift;
67378
67379 down_write(&mm->mmap_sem);
67380+
67381+ /* Move stack pages down in memory. */
67382+ if (stack_shift) {
67383+ ret = shift_arg_pages(vma, stack_shift);
67384+ if (ret)
67385+ goto out_unlock;
67386+ }
67387+
67388 vm_flags = VM_STACK_FLAGS;
67389
67390 /*
67391@@ -658,19 +697,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
67392 vm_flags &= ~VM_EXEC;
67393 vm_flags |= mm->def_flags;
67394
67395+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
67396+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
67397+ vm_flags &= ~VM_EXEC;
67398+
67399+#ifdef CONFIG_PAX_MPROTECT
67400+ if (mm->pax_flags & MF_PAX_MPROTECT)
67401+ vm_flags &= ~VM_MAYEXEC;
67402+#endif
67403+
67404+ }
67405+#endif
67406+
67407 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
67408 vm_flags);
67409 if (ret)
67410 goto out_unlock;
67411 BUG_ON(prev != vma);
67412
67413- /* Move stack pages down in memory. */
67414- if (stack_shift) {
67415- ret = shift_arg_pages(vma, stack_shift);
67416- if (ret)
67417- goto out_unlock;
67418- }
67419-
67420 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
67421 stack_size = vma->vm_end - vma->vm_start;
67422 /*
67423@@ -744,7 +788,7 @@ int kernel_read(struct file *file, loff_t offset,
67424 old_fs = get_fs();
67425 set_fs(get_ds());
67426 /* The cast to a user pointer is valid due to the set_fs() */
67427- result = vfs_read(file, (void __user *)addr, count, &pos);
67428+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
67429 set_fs(old_fs);
67430 return result;
67431 }
67432@@ -985,6 +1029,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
67433 perf_event_comm(tsk);
67434 }
67435
67436+static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
67437+{
67438+ int i, ch;
67439+
67440+ /* Copies the binary name from after last slash */
67441+ for (i = 0; (ch = *(fn++)) != '\0';) {
67442+ if (ch == '/')
67443+ i = 0; /* overwrite what we wrote */
67444+ else
67445+ if (i < len - 1)
67446+ tcomm[i++] = ch;
67447+ }
67448+ tcomm[i] = '\0';
67449+}
67450+
67451 int flush_old_exec(struct linux_binprm * bprm)
67452 {
67453 int retval;
67454@@ -999,6 +1058,7 @@ int flush_old_exec(struct linux_binprm * bprm)
67455
67456 set_mm_exe_file(bprm->mm, bprm->file);
67457
67458+ filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
67459 /*
67460 * Release all of the old mmap stuff
67461 */
67462@@ -1023,10 +1083,6 @@ EXPORT_SYMBOL(flush_old_exec);
67463
67464 void setup_new_exec(struct linux_binprm * bprm)
67465 {
67466- int i, ch;
67467- char * name;
67468- char tcomm[sizeof(current->comm)];
67469-
67470 arch_pick_mmap_layout(current->mm);
67471
67472 /* This is the point of no return */
67473@@ -1037,18 +1093,7 @@ void setup_new_exec(struct linux_binprm * bprm)
67474 else
67475 set_dumpable(current->mm, suid_dumpable);
67476
67477- name = bprm->filename;
67478-
67479- /* Copies the binary name from after last slash */
67480- for (i=0; (ch = *(name++)) != '\0';) {
67481- if (ch == '/')
67482- i = 0; /* overwrite what we wrote */
67483- else
67484- if (i < (sizeof(tcomm) - 1))
67485- tcomm[i++] = ch;
67486- }
67487- tcomm[i] = '\0';
67488- set_task_comm(current, tcomm);
67489+ set_task_comm(current, bprm->tcomm);
67490
67491 /* Set the new mm task size. We have to do that late because it may
67492 * depend on TIF_32BIT which is only updated in flush_thread() on
67493@@ -1152,7 +1197,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
67494 }
67495 rcu_read_unlock();
67496
67497- if (p->fs->users > n_fs) {
67498+ if (atomic_read(&p->fs->users) > n_fs) {
67499 bprm->unsafe |= LSM_UNSAFE_SHARE;
67500 } else {
67501 res = -EAGAIN;
67502@@ -1339,6 +1384,21 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
67503
67504 EXPORT_SYMBOL(search_binary_handler);
67505
67506+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67507+DEFINE_PER_CPU(u64, exec_counter);
67508+static int __init init_exec_counters(void)
67509+{
67510+ unsigned int cpu;
67511+
67512+ for_each_possible_cpu(cpu) {
67513+ per_cpu(exec_counter, cpu) = (u64)cpu;
67514+ }
67515+
67516+ return 0;
67517+}
67518+early_initcall(init_exec_counters);
67519+#endif
67520+
67521 /*
67522 * sys_execve() executes a new program.
67523 */
67524@@ -1347,11 +1407,35 @@ int do_execve(char * filename,
67525 char __user *__user *envp,
67526 struct pt_regs * regs)
67527 {
67528+#ifdef CONFIG_GRKERNSEC
67529+ struct file *old_exec_file;
67530+ struct acl_subject_label *old_acl;
67531+ struct rlimit old_rlim[RLIM_NLIMITS];
67532+#endif
67533 struct linux_binprm *bprm;
67534 struct file *file;
67535 struct files_struct *displaced;
67536 bool clear_in_exec;
67537 int retval;
67538+ const struct cred *cred = current_cred();
67539+
67540+ /*
67541+ * We move the actual failure in case of RLIMIT_NPROC excess from
67542+ * set*uid() to execve() because too many poorly written programs
67543+ * don't check setuid() return code. Here we additionally recheck
67544+ * whether NPROC limit is still exceeded.
67545+ */
67546+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
67547+
67548+ if ((current->flags & PF_NPROC_EXCEEDED) &&
67549+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
67550+ retval = -EAGAIN;
67551+ goto out_ret;
67552+ }
67553+
67554+ /* We're below the limit (still or again), so we don't want to make
67555+ * further execve() calls fail. */
67556+ current->flags &= ~PF_NPROC_EXCEEDED;
67557
67558 retval = unshare_files(&displaced);
67559 if (retval)
67560@@ -1377,12 +1461,27 @@ int do_execve(char * filename,
67561 if (IS_ERR(file))
67562 goto out_unmark;
67563
67564+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
67565+ retval = -EPERM;
67566+ goto out_file;
67567+ }
67568+
67569 sched_exec();
67570
67571 bprm->file = file;
67572 bprm->filename = filename;
67573 bprm->interp = filename;
67574
67575+ if (gr_process_user_ban()) {
67576+ retval = -EPERM;
67577+ goto out_file;
67578+ }
67579+
67580+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
67581+ retval = -EACCES;
67582+ goto out_file;
67583+ }
67584+
67585 retval = bprm_mm_init(bprm);
67586 if (retval)
67587 goto out_file;
67588@@ -1399,25 +1498,66 @@ int do_execve(char * filename,
67589 if (retval < 0)
67590 goto out;
67591
67592+#ifdef CONFIG_GRKERNSEC
67593+ old_acl = current->acl;
67594+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
67595+ old_exec_file = current->exec_file;
67596+ get_file(file);
67597+ current->exec_file = file;
67598+#endif
67599+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67600+ /* limit suid stack to 8MB
67601+ we saved the old limits above and will restore them if this exec fails
67602+ */
67603+ if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
67604+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
67605+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
67606+#endif
67607+
67608+ if (!gr_tpe_allow(file)) {
67609+ retval = -EACCES;
67610+ goto out_fail;
67611+ }
67612+
67613+ if (gr_check_crash_exec(file)) {
67614+ retval = -EACCES;
67615+ goto out_fail;
67616+ }
67617+
67618+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
67619+ bprm->unsafe);
67620+ if (retval < 0)
67621+ goto out_fail;
67622+
67623 retval = copy_strings_kernel(1, &bprm->filename, bprm);
67624 if (retval < 0)
67625- goto out;
67626+ goto out_fail;
67627
67628 bprm->exec = bprm->p;
67629 retval = copy_strings(bprm->envc, envp, bprm);
67630 if (retval < 0)
67631- goto out;
67632+ goto out_fail;
67633
67634 retval = copy_strings(bprm->argc, argv, bprm);
67635 if (retval < 0)
67636- goto out;
67637+ goto out_fail;
67638+
67639+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
67640+
67641+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
67642
67643 current->flags &= ~PF_KTHREAD;
67644 retval = search_binary_handler(bprm,regs);
67645 if (retval < 0)
67646- goto out;
67647+ goto out_fail;
67648+#ifdef CONFIG_GRKERNSEC
67649+ if (old_exec_file)
67650+ fput(old_exec_file);
67651+#endif
67652
67653 /* execve succeeded */
67654+
67655+ increment_exec_counter();
67656 current->fs->in_exec = 0;
67657 current->in_execve = 0;
67658 acct_update_integrals(current);
67659@@ -1426,6 +1566,14 @@ int do_execve(char * filename,
67660 put_files_struct(displaced);
67661 return retval;
67662
67663+out_fail:
67664+#ifdef CONFIG_GRKERNSEC
67665+ current->acl = old_acl;
67666+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
67667+ fput(current->exec_file);
67668+ current->exec_file = old_exec_file;
67669+#endif
67670+
67671 out:
67672 if (bprm->mm) {
67673 acct_arg_size(bprm, 0);
67674@@ -1591,6 +1739,229 @@ out:
67675 return ispipe;
67676 }
67677
67678+int pax_check_flags(unsigned long *flags)
67679+{
67680+ int retval = 0;
67681+
67682+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
67683+ if (*flags & MF_PAX_SEGMEXEC)
67684+ {
67685+ *flags &= ~MF_PAX_SEGMEXEC;
67686+ retval = -EINVAL;
67687+ }
67688+#endif
67689+
67690+ if ((*flags & MF_PAX_PAGEEXEC)
67691+
67692+#ifdef CONFIG_PAX_PAGEEXEC
67693+ && (*flags & MF_PAX_SEGMEXEC)
67694+#endif
67695+
67696+ )
67697+ {
67698+ *flags &= ~MF_PAX_PAGEEXEC;
67699+ retval = -EINVAL;
67700+ }
67701+
67702+ if ((*flags & MF_PAX_MPROTECT)
67703+
67704+#ifdef CONFIG_PAX_MPROTECT
67705+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
67706+#endif
67707+
67708+ )
67709+ {
67710+ *flags &= ~MF_PAX_MPROTECT;
67711+ retval = -EINVAL;
67712+ }
67713+
67714+ if ((*flags & MF_PAX_EMUTRAMP)
67715+
67716+#ifdef CONFIG_PAX_EMUTRAMP
67717+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
67718+#endif
67719+
67720+ )
67721+ {
67722+ *flags &= ~MF_PAX_EMUTRAMP;
67723+ retval = -EINVAL;
67724+ }
67725+
67726+ return retval;
67727+}
67728+
67729+EXPORT_SYMBOL(pax_check_flags);
67730+
67731+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
67732+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
67733+{
67734+ struct task_struct *tsk = current;
67735+ struct mm_struct *mm = current->mm;
67736+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
67737+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
67738+ char *path_exec = NULL;
67739+ char *path_fault = NULL;
67740+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
67741+
67742+ if (buffer_exec && buffer_fault) {
67743+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
67744+
67745+ down_read(&mm->mmap_sem);
67746+ vma = mm->mmap;
67747+ while (vma && (!vma_exec || !vma_fault)) {
67748+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
67749+ vma_exec = vma;
67750+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
67751+ vma_fault = vma;
67752+ vma = vma->vm_next;
67753+ }
67754+ if (vma_exec) {
67755+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
67756+ if (IS_ERR(path_exec))
67757+ path_exec = "<path too long>";
67758+ else {
67759+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
67760+ if (path_exec) {
67761+ *path_exec = 0;
67762+ path_exec = buffer_exec;
67763+ } else
67764+ path_exec = "<path too long>";
67765+ }
67766+ }
67767+ if (vma_fault) {
67768+ start = vma_fault->vm_start;
67769+ end = vma_fault->vm_end;
67770+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
67771+ if (vma_fault->vm_file) {
67772+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
67773+ if (IS_ERR(path_fault))
67774+ path_fault = "<path too long>";
67775+ else {
67776+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
67777+ if (path_fault) {
67778+ *path_fault = 0;
67779+ path_fault = buffer_fault;
67780+ } else
67781+ path_fault = "<path too long>";
67782+ }
67783+ } else
67784+ path_fault = "<anonymous mapping>";
67785+ }
67786+ up_read(&mm->mmap_sem);
67787+ }
67788+ if (tsk->signal->curr_ip)
67789+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
67790+ else
67791+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
67792+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
67793+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
67794+ task_uid(tsk), task_euid(tsk), pc, sp);
67795+ free_page((unsigned long)buffer_exec);
67796+ free_page((unsigned long)buffer_fault);
67797+ pax_report_insns(regs, pc, sp);
67798+ do_coredump(SIGKILL, SIGKILL, regs);
67799+}
67800+#endif
67801+
67802+#ifdef CONFIG_PAX_REFCOUNT
67803+void pax_report_refcount_overflow(struct pt_regs *regs)
67804+{
67805+ if (current->signal->curr_ip)
67806+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
67807+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
67808+ else
67809+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
67810+ current->comm, task_pid_nr(current), current_uid(), current_euid());
67811+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
67812+ show_regs(regs);
67813+ force_sig_specific(SIGKILL, current);
67814+}
67815+#endif
67816+
67817+#ifdef CONFIG_PAX_USERCOPY
67818+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
67819+int object_is_on_stack(const void *obj, unsigned long len)
67820+{
67821+ const void * const stack = task_stack_page(current);
67822+ const void * const stackend = stack + THREAD_SIZE;
67823+
67824+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
67825+ const void *frame = NULL;
67826+ const void *oldframe;
67827+#endif
67828+
67829+ if (obj + len < obj)
67830+ return -1;
67831+
67832+ if (obj + len <= stack || stackend <= obj)
67833+ return 0;
67834+
67835+ if (obj < stack || stackend < obj + len)
67836+ return -1;
67837+
67838+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
67839+ oldframe = __builtin_frame_address(1);
67840+ if (oldframe)
67841+ frame = __builtin_frame_address(2);
67842+ /*
67843+ low ----------------------------------------------> high
67844+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
67845+ ^----------------^
67846+ allow copies only within here
67847+ */
67848+ while (stack <= frame && frame < stackend) {
67849+ /* if obj + len extends past the last frame, this
67850+ check won't pass and the next frame will be 0,
67851+ causing us to bail out and correctly report
67852+ the copy as invalid
67853+ */
67854+ if (obj + len <= frame)
67855+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
67856+ oldframe = frame;
67857+ frame = *(const void * const *)frame;
67858+ }
67859+ return -1;
67860+#else
67861+ return 1;
67862+#endif
67863+}
67864+
67865+__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
67866+{
67867+ if (current->signal->curr_ip)
67868+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
67869+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
67870+ else
67871+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
67872+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
67873+
67874+ dump_stack();
67875+ gr_handle_kernel_exploit();
67876+ do_group_exit(SIGKILL);
67877+}
67878+#endif
67879+
67880+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
67881+void pax_track_stack(void)
67882+{
67883+ unsigned long sp = (unsigned long)&sp;
67884+ if (sp < current_thread_info()->lowest_stack &&
67885+ sp > (unsigned long)task_stack_page(current))
67886+ current_thread_info()->lowest_stack = sp;
67887+}
67888+EXPORT_SYMBOL(pax_track_stack);
67889+#endif
67890+
67891+#ifdef CONFIG_PAX_SIZE_OVERFLOW
67892+void report_size_overflow(const char *file, unsigned int line, const char *func)
67893+{
67894+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line);
67895+ dump_stack();
67896+ do_group_exit(SIGKILL);
67897+}
67898+EXPORT_SYMBOL(report_size_overflow);
67899+#endif
67900+
67901 static int zap_process(struct task_struct *start)
67902 {
67903 struct task_struct *t;
67904@@ -1793,17 +2164,17 @@ static void wait_for_dump_helpers(struct file *file)
67905 pipe = file->f_path.dentry->d_inode->i_pipe;
67906
67907 pipe_lock(pipe);
67908- pipe->readers++;
67909- pipe->writers--;
67910+ atomic_inc(&pipe->readers);
67911+ atomic_dec(&pipe->writers);
67912
67913- while ((pipe->readers > 1) && (!signal_pending(current))) {
67914+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
67915 wake_up_interruptible_sync(&pipe->wait);
67916 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
67917 pipe_wait(pipe);
67918 }
67919
67920- pipe->readers--;
67921- pipe->writers++;
67922+ atomic_dec(&pipe->readers);
67923+ atomic_inc(&pipe->writers);
67924 pipe_unlock(pipe);
67925
67926 }
67927@@ -1826,10 +2197,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
67928 char **helper_argv = NULL;
67929 int helper_argc = 0;
67930 int dump_count = 0;
67931- static atomic_t core_dump_count = ATOMIC_INIT(0);
67932+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
67933
67934 audit_core_dumps(signr);
67935
67936+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
67937+ gr_handle_brute_attach(current, mm->flags);
67938+
67939 binfmt = mm->binfmt;
67940 if (!binfmt || !binfmt->core_dump)
67941 goto fail;
67942@@ -1874,6 +2248,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
67943 */
67944 clear_thread_flag(TIF_SIGPENDING);
67945
67946+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
67947+
67948 /*
67949 * lock_kernel() because format_corename() is controlled by sysctl, which
67950 * uses lock_kernel()
67951@@ -1908,7 +2284,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
67952 goto fail_unlock;
67953 }
67954
67955- dump_count = atomic_inc_return(&core_dump_count);
67956+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
67957 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
67958 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
67959 task_tgid_vnr(current), current->comm);
67960@@ -1972,7 +2348,7 @@ close_fail:
67961 filp_close(file, NULL);
67962 fail_dropcount:
67963 if (dump_count)
67964- atomic_dec(&core_dump_count);
67965+ atomic_dec_unchecked(&core_dump_count);
67966 fail_unlock:
67967 if (helper_argv)
67968 argv_free(helper_argv);
67969diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
67970index 7f8d2e5..a1abdbb 100644
67971--- a/fs/ext2/balloc.c
67972+++ b/fs/ext2/balloc.c
67973@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
67974
67975 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
67976 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
67977- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
67978+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
67979 sbi->s_resuid != current_fsuid() &&
67980 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
67981 return 0;
67982diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
67983index 27967f9..9f2a5fb 100644
67984--- a/fs/ext3/balloc.c
67985+++ b/fs/ext3/balloc.c
67986@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
67987
67988 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
67989 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
67990- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
67991+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
67992 sbi->s_resuid != current_fsuid() &&
67993 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
67994 return 0;
67995diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
67996index e85b63c..80398e6 100644
67997--- a/fs/ext4/balloc.c
67998+++ b/fs/ext4/balloc.c
67999@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
68000 /* Hm, nope. Are (enough) root reserved blocks available? */
68001 if (sbi->s_resuid == current_fsuid() ||
68002 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
68003- capable(CAP_SYS_RESOURCE)) {
68004+ capable_nolog(CAP_SYS_RESOURCE)) {
68005 if (free_blocks >= (nblocks + dirty_blocks))
68006 return 1;
68007 }
68008diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
68009index 67c46ed..1f237e5 100644
68010--- a/fs/ext4/ext4.h
68011+++ b/fs/ext4/ext4.h
68012@@ -1077,19 +1077,19 @@ struct ext4_sb_info {
68013
68014 /* stats for buddy allocator */
68015 spinlock_t s_mb_pa_lock;
68016- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
68017- atomic_t s_bal_success; /* we found long enough chunks */
68018- atomic_t s_bal_allocated; /* in blocks */
68019- atomic_t s_bal_ex_scanned; /* total extents scanned */
68020- atomic_t s_bal_goals; /* goal hits */
68021- atomic_t s_bal_breaks; /* too long searches */
68022- atomic_t s_bal_2orders; /* 2^order hits */
68023+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
68024+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
68025+ atomic_unchecked_t s_bal_allocated; /* in blocks */
68026+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
68027+ atomic_unchecked_t s_bal_goals; /* goal hits */
68028+ atomic_unchecked_t s_bal_breaks; /* too long searches */
68029+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
68030 spinlock_t s_bal_lock;
68031 unsigned long s_mb_buddies_generated;
68032 unsigned long long s_mb_generation_time;
68033- atomic_t s_mb_lost_chunks;
68034- atomic_t s_mb_preallocated;
68035- atomic_t s_mb_discarded;
68036+ atomic_unchecked_t s_mb_lost_chunks;
68037+ atomic_unchecked_t s_mb_preallocated;
68038+ atomic_unchecked_t s_mb_discarded;
68039 atomic_t s_lock_busy;
68040
68041 /* locality groups */
68042diff --git a/fs/ext4/file.c b/fs/ext4/file.c
68043index 2a60541..7439d61 100644
68044--- a/fs/ext4/file.c
68045+++ b/fs/ext4/file.c
68046@@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
68047 cp = d_path(&path, buf, sizeof(buf));
68048 path_put(&path);
68049 if (!IS_ERR(cp)) {
68050- memcpy(sbi->s_es->s_last_mounted, cp,
68051- sizeof(sbi->s_es->s_last_mounted));
68052+ strlcpy(sbi->s_es->s_last_mounted, cp,
68053+ sizeof(sbi->s_es->s_last_mounted));
68054 sb->s_dirt = 1;
68055 }
68056 }
68057diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
68058index 42bac1b..0aab9d8 100644
68059--- a/fs/ext4/mballoc.c
68060+++ b/fs/ext4/mballoc.c
68061@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
68062 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
68063
68064 if (EXT4_SB(sb)->s_mb_stats)
68065- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
68066+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
68067
68068 break;
68069 }
68070@@ -2131,7 +2131,7 @@ repeat:
68071 ac->ac_status = AC_STATUS_CONTINUE;
68072 ac->ac_flags |= EXT4_MB_HINT_FIRST;
68073 cr = 3;
68074- atomic_inc(&sbi->s_mb_lost_chunks);
68075+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
68076 goto repeat;
68077 }
68078 }
68079@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
68080 ext4_grpblk_t counters[16];
68081 } sg;
68082
68083+ pax_track_stack();
68084+
68085 group--;
68086 if (group == 0)
68087 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
68088@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb)
68089 if (sbi->s_mb_stats) {
68090 printk(KERN_INFO
68091 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
68092- atomic_read(&sbi->s_bal_allocated),
68093- atomic_read(&sbi->s_bal_reqs),
68094- atomic_read(&sbi->s_bal_success));
68095+ atomic_read_unchecked(&sbi->s_bal_allocated),
68096+ atomic_read_unchecked(&sbi->s_bal_reqs),
68097+ atomic_read_unchecked(&sbi->s_bal_success));
68098 printk(KERN_INFO
68099 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
68100 "%u 2^N hits, %u breaks, %u lost\n",
68101- atomic_read(&sbi->s_bal_ex_scanned),
68102- atomic_read(&sbi->s_bal_goals),
68103- atomic_read(&sbi->s_bal_2orders),
68104- atomic_read(&sbi->s_bal_breaks),
68105- atomic_read(&sbi->s_mb_lost_chunks));
68106+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
68107+ atomic_read_unchecked(&sbi->s_bal_goals),
68108+ atomic_read_unchecked(&sbi->s_bal_2orders),
68109+ atomic_read_unchecked(&sbi->s_bal_breaks),
68110+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
68111 printk(KERN_INFO
68112 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
68113 sbi->s_mb_buddies_generated++,
68114 sbi->s_mb_generation_time);
68115 printk(KERN_INFO
68116 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
68117- atomic_read(&sbi->s_mb_preallocated),
68118- atomic_read(&sbi->s_mb_discarded));
68119+ atomic_read_unchecked(&sbi->s_mb_preallocated),
68120+ atomic_read_unchecked(&sbi->s_mb_discarded));
68121 }
68122
68123 free_percpu(sbi->s_locality_groups);
68124@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
68125 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
68126
68127 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
68128- atomic_inc(&sbi->s_bal_reqs);
68129- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
68130+ atomic_inc_unchecked(&sbi->s_bal_reqs);
68131+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
68132 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
68133- atomic_inc(&sbi->s_bal_success);
68134- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
68135+ atomic_inc_unchecked(&sbi->s_bal_success);
68136+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
68137 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
68138 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
68139- atomic_inc(&sbi->s_bal_goals);
68140+ atomic_inc_unchecked(&sbi->s_bal_goals);
68141 if (ac->ac_found > sbi->s_mb_max_to_scan)
68142- atomic_inc(&sbi->s_bal_breaks);
68143+ atomic_inc_unchecked(&sbi->s_bal_breaks);
68144 }
68145
68146 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
68147@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
68148 trace_ext4_mb_new_inode_pa(ac, pa);
68149
68150 ext4_mb_use_inode_pa(ac, pa);
68151- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
68152+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
68153
68154 ei = EXT4_I(ac->ac_inode);
68155 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
68156@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
68157 trace_ext4_mb_new_group_pa(ac, pa);
68158
68159 ext4_mb_use_group_pa(ac, pa);
68160- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
68161+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
68162
68163 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
68164 lg = ac->ac_lg;
68165@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
68166 * from the bitmap and continue.
68167 */
68168 }
68169- atomic_add(free, &sbi->s_mb_discarded);
68170+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
68171
68172 return err;
68173 }
68174@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
68175 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
68176 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
68177 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
68178- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
68179+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
68180
68181 if (ac) {
68182 ac->ac_sb = sb;
68183diff --git a/fs/ext4/super.c b/fs/ext4/super.c
68184index f1e7077..edd86b2 100644
68185--- a/fs/ext4/super.c
68186+++ b/fs/ext4/super.c
68187@@ -2286,7 +2286,7 @@ static void ext4_sb_release(struct kobject *kobj)
68188 }
68189
68190
68191-static struct sysfs_ops ext4_attr_ops = {
68192+static const struct sysfs_ops ext4_attr_ops = {
68193 .show = ext4_attr_show,
68194 .store = ext4_attr_store,
68195 };
68196diff --git a/fs/fcntl.c b/fs/fcntl.c
68197index 97e01dc..e9aab2d 100644
68198--- a/fs/fcntl.c
68199+++ b/fs/fcntl.c
68200@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
68201 if (err)
68202 return err;
68203
68204+ if (gr_handle_chroot_fowner(pid, type))
68205+ return -ENOENT;
68206+ if (gr_check_protected_task_fowner(pid, type))
68207+ return -EACCES;
68208+
68209 f_modown(filp, pid, type, force);
68210 return 0;
68211 }
68212@@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
68213
68214 static int f_setown_ex(struct file *filp, unsigned long arg)
68215 {
68216- struct f_owner_ex * __user owner_p = (void * __user)arg;
68217+ struct f_owner_ex __user *owner_p = (void __user *)arg;
68218 struct f_owner_ex owner;
68219 struct pid *pid;
68220 int type;
68221@@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
68222
68223 static int f_getown_ex(struct file *filp, unsigned long arg)
68224 {
68225- struct f_owner_ex * __user owner_p = (void * __user)arg;
68226+ struct f_owner_ex __user *owner_p = (void __user *)arg;
68227 struct f_owner_ex owner;
68228 int ret = 0;
68229
68230@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
68231 switch (cmd) {
68232 case F_DUPFD:
68233 case F_DUPFD_CLOEXEC:
68234+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
68235 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
68236 break;
68237 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
68238diff --git a/fs/fifo.c b/fs/fifo.c
68239index f8f97b8..b1f2259 100644
68240--- a/fs/fifo.c
68241+++ b/fs/fifo.c
68242@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
68243 */
68244 filp->f_op = &read_pipefifo_fops;
68245 pipe->r_counter++;
68246- if (pipe->readers++ == 0)
68247+ if (atomic_inc_return(&pipe->readers) == 1)
68248 wake_up_partner(inode);
68249
68250- if (!pipe->writers) {
68251+ if (!atomic_read(&pipe->writers)) {
68252 if ((filp->f_flags & O_NONBLOCK)) {
68253 /* suppress POLLHUP until we have
68254 * seen a writer */
68255@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
68256 * errno=ENXIO when there is no process reading the FIFO.
68257 */
68258 ret = -ENXIO;
68259- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
68260+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
68261 goto err;
68262
68263 filp->f_op = &write_pipefifo_fops;
68264 pipe->w_counter++;
68265- if (!pipe->writers++)
68266+ if (atomic_inc_return(&pipe->writers) == 1)
68267 wake_up_partner(inode);
68268
68269- if (!pipe->readers) {
68270+ if (!atomic_read(&pipe->readers)) {
68271 wait_for_partner(inode, &pipe->r_counter);
68272 if (signal_pending(current))
68273 goto err_wr;
68274@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
68275 */
68276 filp->f_op = &rdwr_pipefifo_fops;
68277
68278- pipe->readers++;
68279- pipe->writers++;
68280+ atomic_inc(&pipe->readers);
68281+ atomic_inc(&pipe->writers);
68282 pipe->r_counter++;
68283 pipe->w_counter++;
68284- if (pipe->readers == 1 || pipe->writers == 1)
68285+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
68286 wake_up_partner(inode);
68287 break;
68288
68289@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
68290 return 0;
68291
68292 err_rd:
68293- if (!--pipe->readers)
68294+ if (atomic_dec_and_test(&pipe->readers))
68295 wake_up_interruptible(&pipe->wait);
68296 ret = -ERESTARTSYS;
68297 goto err;
68298
68299 err_wr:
68300- if (!--pipe->writers)
68301+ if (atomic_dec_and_test(&pipe->writers))
68302 wake_up_interruptible(&pipe->wait);
68303 ret = -ERESTARTSYS;
68304 goto err;
68305
68306 err:
68307- if (!pipe->readers && !pipe->writers)
68308+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
68309 free_pipe_info(inode);
68310
68311 err_nocleanup:
68312diff --git a/fs/file.c b/fs/file.c
68313index 87e1290..a930cc4 100644
68314--- a/fs/file.c
68315+++ b/fs/file.c
68316@@ -14,6 +14,7 @@
68317 #include <linux/slab.h>
68318 #include <linux/vmalloc.h>
68319 #include <linux/file.h>
68320+#include <linux/security.h>
68321 #include <linux/fdtable.h>
68322 #include <linux/bitops.h>
68323 #include <linux/interrupt.h>
68324@@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr)
68325 * N.B. For clone tasks sharing a files structure, this test
68326 * will limit the total number of files that can be opened.
68327 */
68328+
68329+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
68330 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
68331 return -EMFILE;
68332
68333diff --git a/fs/filesystems.c b/fs/filesystems.c
68334index a24c58e..53f91ee 100644
68335--- a/fs/filesystems.c
68336+++ b/fs/filesystems.c
68337@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name)
68338 int len = dot ? dot - name : strlen(name);
68339
68340 fs = __get_fs_type(name, len);
68341+
68342+#ifdef CONFIG_GRKERNSEC_MODHARDEN
68343+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
68344+#else
68345 if (!fs && (request_module("%.*s", len, name) == 0))
68346+#endif
68347 fs = __get_fs_type(name, len);
68348
68349 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
68350diff --git a/fs/fs_struct.c b/fs/fs_struct.c
68351index eee0590..1181166 100644
68352--- a/fs/fs_struct.c
68353+++ b/fs/fs_struct.c
68354@@ -4,6 +4,7 @@
68355 #include <linux/path.h>
68356 #include <linux/slab.h>
68357 #include <linux/fs_struct.h>
68358+#include <linux/grsecurity.h>
68359
68360 /*
68361 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
68362@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
68363 old_root = fs->root;
68364 fs->root = *path;
68365 path_get(path);
68366+ gr_set_chroot_entries(current, path);
68367 write_unlock(&fs->lock);
68368 if (old_root.dentry)
68369 path_put(&old_root);
68370@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
68371 && fs->root.mnt == old_root->mnt) {
68372 path_get(new_root);
68373 fs->root = *new_root;
68374+ gr_set_chroot_entries(p, new_root);
68375 count++;
68376 }
68377 if (fs->pwd.dentry == old_root->dentry
68378@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
68379 task_lock(tsk);
68380 write_lock(&fs->lock);
68381 tsk->fs = NULL;
68382- kill = !--fs->users;
68383+ gr_clear_chroot_entries(tsk);
68384+ kill = !atomic_dec_return(&fs->users);
68385 write_unlock(&fs->lock);
68386 task_unlock(tsk);
68387 if (kill)
68388@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
68389 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
68390 /* We don't need to lock fs - think why ;-) */
68391 if (fs) {
68392- fs->users = 1;
68393+ atomic_set(&fs->users, 1);
68394 fs->in_exec = 0;
68395 rwlock_init(&fs->lock);
68396 fs->umask = old->umask;
68397@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
68398
68399 task_lock(current);
68400 write_lock(&fs->lock);
68401- kill = !--fs->users;
68402+ kill = !atomic_dec_return(&fs->users);
68403 current->fs = new_fs;
68404+ gr_set_chroot_entries(current, &new_fs->root);
68405 write_unlock(&fs->lock);
68406 task_unlock(current);
68407
68408@@ -141,13 +146,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
68409
68410 int current_umask(void)
68411 {
68412- return current->fs->umask;
68413+ return current->fs->umask | gr_acl_umask();
68414 }
68415 EXPORT_SYMBOL(current_umask);
68416
68417 /* to be mentioned only in INIT_TASK */
68418 struct fs_struct init_fs = {
68419- .users = 1,
68420+ .users = ATOMIC_INIT(1),
68421 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
68422 .umask = 0022,
68423 };
68424@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
68425 task_lock(current);
68426
68427 write_lock(&init_fs.lock);
68428- init_fs.users++;
68429+ atomic_inc(&init_fs.users);
68430 write_unlock(&init_fs.lock);
68431
68432 write_lock(&fs->lock);
68433 current->fs = &init_fs;
68434- kill = !--fs->users;
68435+ gr_set_chroot_entries(current, &current->fs->root);
68436+ kill = !atomic_dec_return(&fs->users);
68437 write_unlock(&fs->lock);
68438
68439 task_unlock(current);
68440diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
68441index 9905350..02eaec4 100644
68442--- a/fs/fscache/cookie.c
68443+++ b/fs/fscache/cookie.c
68444@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
68445 parent ? (char *) parent->def->name : "<no-parent>",
68446 def->name, netfs_data);
68447
68448- fscache_stat(&fscache_n_acquires);
68449+ fscache_stat_unchecked(&fscache_n_acquires);
68450
68451 /* if there's no parent cookie, then we don't create one here either */
68452 if (!parent) {
68453- fscache_stat(&fscache_n_acquires_null);
68454+ fscache_stat_unchecked(&fscache_n_acquires_null);
68455 _leave(" [no parent]");
68456 return NULL;
68457 }
68458@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
68459 /* allocate and initialise a cookie */
68460 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
68461 if (!cookie) {
68462- fscache_stat(&fscache_n_acquires_oom);
68463+ fscache_stat_unchecked(&fscache_n_acquires_oom);
68464 _leave(" [ENOMEM]");
68465 return NULL;
68466 }
68467@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
68468
68469 switch (cookie->def->type) {
68470 case FSCACHE_COOKIE_TYPE_INDEX:
68471- fscache_stat(&fscache_n_cookie_index);
68472+ fscache_stat_unchecked(&fscache_n_cookie_index);
68473 break;
68474 case FSCACHE_COOKIE_TYPE_DATAFILE:
68475- fscache_stat(&fscache_n_cookie_data);
68476+ fscache_stat_unchecked(&fscache_n_cookie_data);
68477 break;
68478 default:
68479- fscache_stat(&fscache_n_cookie_special);
68480+ fscache_stat_unchecked(&fscache_n_cookie_special);
68481 break;
68482 }
68483
68484@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
68485 if (fscache_acquire_non_index_cookie(cookie) < 0) {
68486 atomic_dec(&parent->n_children);
68487 __fscache_cookie_put(cookie);
68488- fscache_stat(&fscache_n_acquires_nobufs);
68489+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
68490 _leave(" = NULL");
68491 return NULL;
68492 }
68493 }
68494
68495- fscache_stat(&fscache_n_acquires_ok);
68496+ fscache_stat_unchecked(&fscache_n_acquires_ok);
68497 _leave(" = %p", cookie);
68498 return cookie;
68499 }
68500@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
68501 cache = fscache_select_cache_for_object(cookie->parent);
68502 if (!cache) {
68503 up_read(&fscache_addremove_sem);
68504- fscache_stat(&fscache_n_acquires_no_cache);
68505+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
68506 _leave(" = -ENOMEDIUM [no cache]");
68507 return -ENOMEDIUM;
68508 }
68509@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
68510 object = cache->ops->alloc_object(cache, cookie);
68511 fscache_stat_d(&fscache_n_cop_alloc_object);
68512 if (IS_ERR(object)) {
68513- fscache_stat(&fscache_n_object_no_alloc);
68514+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
68515 ret = PTR_ERR(object);
68516 goto error;
68517 }
68518
68519- fscache_stat(&fscache_n_object_alloc);
68520+ fscache_stat_unchecked(&fscache_n_object_alloc);
68521
68522 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
68523
68524@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
68525 struct fscache_object *object;
68526 struct hlist_node *_p;
68527
68528- fscache_stat(&fscache_n_updates);
68529+ fscache_stat_unchecked(&fscache_n_updates);
68530
68531 if (!cookie) {
68532- fscache_stat(&fscache_n_updates_null);
68533+ fscache_stat_unchecked(&fscache_n_updates_null);
68534 _leave(" [no cookie]");
68535 return;
68536 }
68537@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
68538 struct fscache_object *object;
68539 unsigned long event;
68540
68541- fscache_stat(&fscache_n_relinquishes);
68542+ fscache_stat_unchecked(&fscache_n_relinquishes);
68543 if (retire)
68544- fscache_stat(&fscache_n_relinquishes_retire);
68545+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
68546
68547 if (!cookie) {
68548- fscache_stat(&fscache_n_relinquishes_null);
68549+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
68550 _leave(" [no cookie]");
68551 return;
68552 }
68553@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
68554
68555 /* wait for the cookie to finish being instantiated (or to fail) */
68556 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
68557- fscache_stat(&fscache_n_relinquishes_waitcrt);
68558+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
68559 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
68560 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
68561 }
68562diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
68563index edd7434..0725e66 100644
68564--- a/fs/fscache/internal.h
68565+++ b/fs/fscache/internal.h
68566@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
68567 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
68568 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
68569
68570-extern atomic_t fscache_n_op_pend;
68571-extern atomic_t fscache_n_op_run;
68572-extern atomic_t fscache_n_op_enqueue;
68573-extern atomic_t fscache_n_op_deferred_release;
68574-extern atomic_t fscache_n_op_release;
68575-extern atomic_t fscache_n_op_gc;
68576-extern atomic_t fscache_n_op_cancelled;
68577-extern atomic_t fscache_n_op_rejected;
68578+extern atomic_unchecked_t fscache_n_op_pend;
68579+extern atomic_unchecked_t fscache_n_op_run;
68580+extern atomic_unchecked_t fscache_n_op_enqueue;
68581+extern atomic_unchecked_t fscache_n_op_deferred_release;
68582+extern atomic_unchecked_t fscache_n_op_release;
68583+extern atomic_unchecked_t fscache_n_op_gc;
68584+extern atomic_unchecked_t fscache_n_op_cancelled;
68585+extern atomic_unchecked_t fscache_n_op_rejected;
68586
68587-extern atomic_t fscache_n_attr_changed;
68588-extern atomic_t fscache_n_attr_changed_ok;
68589-extern atomic_t fscache_n_attr_changed_nobufs;
68590-extern atomic_t fscache_n_attr_changed_nomem;
68591-extern atomic_t fscache_n_attr_changed_calls;
68592+extern atomic_unchecked_t fscache_n_attr_changed;
68593+extern atomic_unchecked_t fscache_n_attr_changed_ok;
68594+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
68595+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
68596+extern atomic_unchecked_t fscache_n_attr_changed_calls;
68597
68598-extern atomic_t fscache_n_allocs;
68599-extern atomic_t fscache_n_allocs_ok;
68600-extern atomic_t fscache_n_allocs_wait;
68601-extern atomic_t fscache_n_allocs_nobufs;
68602-extern atomic_t fscache_n_allocs_intr;
68603-extern atomic_t fscache_n_allocs_object_dead;
68604-extern atomic_t fscache_n_alloc_ops;
68605-extern atomic_t fscache_n_alloc_op_waits;
68606+extern atomic_unchecked_t fscache_n_allocs;
68607+extern atomic_unchecked_t fscache_n_allocs_ok;
68608+extern atomic_unchecked_t fscache_n_allocs_wait;
68609+extern atomic_unchecked_t fscache_n_allocs_nobufs;
68610+extern atomic_unchecked_t fscache_n_allocs_intr;
68611+extern atomic_unchecked_t fscache_n_allocs_object_dead;
68612+extern atomic_unchecked_t fscache_n_alloc_ops;
68613+extern atomic_unchecked_t fscache_n_alloc_op_waits;
68614
68615-extern atomic_t fscache_n_retrievals;
68616-extern atomic_t fscache_n_retrievals_ok;
68617-extern atomic_t fscache_n_retrievals_wait;
68618-extern atomic_t fscache_n_retrievals_nodata;
68619-extern atomic_t fscache_n_retrievals_nobufs;
68620-extern atomic_t fscache_n_retrievals_intr;
68621-extern atomic_t fscache_n_retrievals_nomem;
68622-extern atomic_t fscache_n_retrievals_object_dead;
68623-extern atomic_t fscache_n_retrieval_ops;
68624-extern atomic_t fscache_n_retrieval_op_waits;
68625+extern atomic_unchecked_t fscache_n_retrievals;
68626+extern atomic_unchecked_t fscache_n_retrievals_ok;
68627+extern atomic_unchecked_t fscache_n_retrievals_wait;
68628+extern atomic_unchecked_t fscache_n_retrievals_nodata;
68629+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
68630+extern atomic_unchecked_t fscache_n_retrievals_intr;
68631+extern atomic_unchecked_t fscache_n_retrievals_nomem;
68632+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
68633+extern atomic_unchecked_t fscache_n_retrieval_ops;
68634+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
68635
68636-extern atomic_t fscache_n_stores;
68637-extern atomic_t fscache_n_stores_ok;
68638-extern atomic_t fscache_n_stores_again;
68639-extern atomic_t fscache_n_stores_nobufs;
68640-extern atomic_t fscache_n_stores_oom;
68641-extern atomic_t fscache_n_store_ops;
68642-extern atomic_t fscache_n_store_calls;
68643-extern atomic_t fscache_n_store_pages;
68644-extern atomic_t fscache_n_store_radix_deletes;
68645-extern atomic_t fscache_n_store_pages_over_limit;
68646+extern atomic_unchecked_t fscache_n_stores;
68647+extern atomic_unchecked_t fscache_n_stores_ok;
68648+extern atomic_unchecked_t fscache_n_stores_again;
68649+extern atomic_unchecked_t fscache_n_stores_nobufs;
68650+extern atomic_unchecked_t fscache_n_stores_oom;
68651+extern atomic_unchecked_t fscache_n_store_ops;
68652+extern atomic_unchecked_t fscache_n_store_calls;
68653+extern atomic_unchecked_t fscache_n_store_pages;
68654+extern atomic_unchecked_t fscache_n_store_radix_deletes;
68655+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
68656
68657-extern atomic_t fscache_n_store_vmscan_not_storing;
68658-extern atomic_t fscache_n_store_vmscan_gone;
68659-extern atomic_t fscache_n_store_vmscan_busy;
68660-extern atomic_t fscache_n_store_vmscan_cancelled;
68661+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
68662+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
68663+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
68664+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
68665
68666-extern atomic_t fscache_n_marks;
68667-extern atomic_t fscache_n_uncaches;
68668+extern atomic_unchecked_t fscache_n_marks;
68669+extern atomic_unchecked_t fscache_n_uncaches;
68670
68671-extern atomic_t fscache_n_acquires;
68672-extern atomic_t fscache_n_acquires_null;
68673-extern atomic_t fscache_n_acquires_no_cache;
68674-extern atomic_t fscache_n_acquires_ok;
68675-extern atomic_t fscache_n_acquires_nobufs;
68676-extern atomic_t fscache_n_acquires_oom;
68677+extern atomic_unchecked_t fscache_n_acquires;
68678+extern atomic_unchecked_t fscache_n_acquires_null;
68679+extern atomic_unchecked_t fscache_n_acquires_no_cache;
68680+extern atomic_unchecked_t fscache_n_acquires_ok;
68681+extern atomic_unchecked_t fscache_n_acquires_nobufs;
68682+extern atomic_unchecked_t fscache_n_acquires_oom;
68683
68684-extern atomic_t fscache_n_updates;
68685-extern atomic_t fscache_n_updates_null;
68686-extern atomic_t fscache_n_updates_run;
68687+extern atomic_unchecked_t fscache_n_updates;
68688+extern atomic_unchecked_t fscache_n_updates_null;
68689+extern atomic_unchecked_t fscache_n_updates_run;
68690
68691-extern atomic_t fscache_n_relinquishes;
68692-extern atomic_t fscache_n_relinquishes_null;
68693-extern atomic_t fscache_n_relinquishes_waitcrt;
68694-extern atomic_t fscache_n_relinquishes_retire;
68695+extern atomic_unchecked_t fscache_n_relinquishes;
68696+extern atomic_unchecked_t fscache_n_relinquishes_null;
68697+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
68698+extern atomic_unchecked_t fscache_n_relinquishes_retire;
68699
68700-extern atomic_t fscache_n_cookie_index;
68701-extern atomic_t fscache_n_cookie_data;
68702-extern atomic_t fscache_n_cookie_special;
68703+extern atomic_unchecked_t fscache_n_cookie_index;
68704+extern atomic_unchecked_t fscache_n_cookie_data;
68705+extern atomic_unchecked_t fscache_n_cookie_special;
68706
68707-extern atomic_t fscache_n_object_alloc;
68708-extern atomic_t fscache_n_object_no_alloc;
68709-extern atomic_t fscache_n_object_lookups;
68710-extern atomic_t fscache_n_object_lookups_negative;
68711-extern atomic_t fscache_n_object_lookups_positive;
68712-extern atomic_t fscache_n_object_lookups_timed_out;
68713-extern atomic_t fscache_n_object_created;
68714-extern atomic_t fscache_n_object_avail;
68715-extern atomic_t fscache_n_object_dead;
68716+extern atomic_unchecked_t fscache_n_object_alloc;
68717+extern atomic_unchecked_t fscache_n_object_no_alloc;
68718+extern atomic_unchecked_t fscache_n_object_lookups;
68719+extern atomic_unchecked_t fscache_n_object_lookups_negative;
68720+extern atomic_unchecked_t fscache_n_object_lookups_positive;
68721+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
68722+extern atomic_unchecked_t fscache_n_object_created;
68723+extern atomic_unchecked_t fscache_n_object_avail;
68724+extern atomic_unchecked_t fscache_n_object_dead;
68725
68726-extern atomic_t fscache_n_checkaux_none;
68727-extern atomic_t fscache_n_checkaux_okay;
68728-extern atomic_t fscache_n_checkaux_update;
68729-extern atomic_t fscache_n_checkaux_obsolete;
68730+extern atomic_unchecked_t fscache_n_checkaux_none;
68731+extern atomic_unchecked_t fscache_n_checkaux_okay;
68732+extern atomic_unchecked_t fscache_n_checkaux_update;
68733+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
68734
68735 extern atomic_t fscache_n_cop_alloc_object;
68736 extern atomic_t fscache_n_cop_lookup_object;
68737@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat)
68738 atomic_inc(stat);
68739 }
68740
68741+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
68742+{
68743+ atomic_inc_unchecked(stat);
68744+}
68745+
68746 static inline void fscache_stat_d(atomic_t *stat)
68747 {
68748 atomic_dec(stat);
68749@@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops;
68750
68751 #define __fscache_stat(stat) (NULL)
68752 #define fscache_stat(stat) do {} while (0)
68753+#define fscache_stat_unchecked(stat) do {} while (0)
68754 #define fscache_stat_d(stat) do {} while (0)
68755 #endif
68756
68757diff --git a/fs/fscache/object.c b/fs/fscache/object.c
68758index e513ac5..e888d34 100644
68759--- a/fs/fscache/object.c
68760+++ b/fs/fscache/object.c
68761@@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
68762 /* update the object metadata on disk */
68763 case FSCACHE_OBJECT_UPDATING:
68764 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
68765- fscache_stat(&fscache_n_updates_run);
68766+ fscache_stat_unchecked(&fscache_n_updates_run);
68767 fscache_stat(&fscache_n_cop_update_object);
68768 object->cache->ops->update_object(object);
68769 fscache_stat_d(&fscache_n_cop_update_object);
68770@@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
68771 spin_lock(&object->lock);
68772 object->state = FSCACHE_OBJECT_DEAD;
68773 spin_unlock(&object->lock);
68774- fscache_stat(&fscache_n_object_dead);
68775+ fscache_stat_unchecked(&fscache_n_object_dead);
68776 goto terminal_transit;
68777
68778 /* handle the parent cache of this object being withdrawn from
68779@@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
68780 spin_lock(&object->lock);
68781 object->state = FSCACHE_OBJECT_DEAD;
68782 spin_unlock(&object->lock);
68783- fscache_stat(&fscache_n_object_dead);
68784+ fscache_stat_unchecked(&fscache_n_object_dead);
68785 goto terminal_transit;
68786
68787 /* complain about the object being woken up once it is
68788@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object)
68789 parent->cookie->def->name, cookie->def->name,
68790 object->cache->tag->name);
68791
68792- fscache_stat(&fscache_n_object_lookups);
68793+ fscache_stat_unchecked(&fscache_n_object_lookups);
68794 fscache_stat(&fscache_n_cop_lookup_object);
68795 ret = object->cache->ops->lookup_object(object);
68796 fscache_stat_d(&fscache_n_cop_lookup_object);
68797@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object)
68798 if (ret == -ETIMEDOUT) {
68799 /* probably stuck behind another object, so move this one to
68800 * the back of the queue */
68801- fscache_stat(&fscache_n_object_lookups_timed_out);
68802+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
68803 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
68804 }
68805
68806@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
68807
68808 spin_lock(&object->lock);
68809 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
68810- fscache_stat(&fscache_n_object_lookups_negative);
68811+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
68812
68813 /* transit here to allow write requests to begin stacking up
68814 * and read requests to begin returning ENODATA */
68815@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object)
68816 * result, in which case there may be data available */
68817 spin_lock(&object->lock);
68818 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
68819- fscache_stat(&fscache_n_object_lookups_positive);
68820+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
68821
68822 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
68823
68824@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object)
68825 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
68826 } else {
68827 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
68828- fscache_stat(&fscache_n_object_created);
68829+ fscache_stat_unchecked(&fscache_n_object_created);
68830
68831 object->state = FSCACHE_OBJECT_AVAILABLE;
68832 spin_unlock(&object->lock);
68833@@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object)
68834 fscache_enqueue_dependents(object);
68835
68836 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
68837- fscache_stat(&fscache_n_object_avail);
68838+ fscache_stat_unchecked(&fscache_n_object_avail);
68839
68840 _leave("");
68841 }
68842@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
68843 enum fscache_checkaux result;
68844
68845 if (!object->cookie->def->check_aux) {
68846- fscache_stat(&fscache_n_checkaux_none);
68847+ fscache_stat_unchecked(&fscache_n_checkaux_none);
68848 return FSCACHE_CHECKAUX_OKAY;
68849 }
68850
68851@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
68852 switch (result) {
68853 /* entry okay as is */
68854 case FSCACHE_CHECKAUX_OKAY:
68855- fscache_stat(&fscache_n_checkaux_okay);
68856+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
68857 break;
68858
68859 /* entry requires update */
68860 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
68861- fscache_stat(&fscache_n_checkaux_update);
68862+ fscache_stat_unchecked(&fscache_n_checkaux_update);
68863 break;
68864
68865 /* entry requires deletion */
68866 case FSCACHE_CHECKAUX_OBSOLETE:
68867- fscache_stat(&fscache_n_checkaux_obsolete);
68868+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
68869 break;
68870
68871 default:
68872diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
68873index 313e79a..775240f 100644
68874--- a/fs/fscache/operation.c
68875+++ b/fs/fscache/operation.c
68876@@ -16,7 +16,7 @@
68877 #include <linux/seq_file.h>
68878 #include "internal.h"
68879
68880-atomic_t fscache_op_debug_id;
68881+atomic_unchecked_t fscache_op_debug_id;
68882 EXPORT_SYMBOL(fscache_op_debug_id);
68883
68884 /**
68885@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
68886 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
68887 ASSERTCMP(atomic_read(&op->usage), >, 0);
68888
68889- fscache_stat(&fscache_n_op_enqueue);
68890+ fscache_stat_unchecked(&fscache_n_op_enqueue);
68891 switch (op->flags & FSCACHE_OP_TYPE) {
68892 case FSCACHE_OP_FAST:
68893 _debug("queue fast");
68894@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object,
68895 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
68896 if (op->processor)
68897 fscache_enqueue_operation(op);
68898- fscache_stat(&fscache_n_op_run);
68899+ fscache_stat_unchecked(&fscache_n_op_run);
68900 }
68901
68902 /*
68903@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
68904 if (object->n_ops > 0) {
68905 atomic_inc(&op->usage);
68906 list_add_tail(&op->pend_link, &object->pending_ops);
68907- fscache_stat(&fscache_n_op_pend);
68908+ fscache_stat_unchecked(&fscache_n_op_pend);
68909 } else if (!list_empty(&object->pending_ops)) {
68910 atomic_inc(&op->usage);
68911 list_add_tail(&op->pend_link, &object->pending_ops);
68912- fscache_stat(&fscache_n_op_pend);
68913+ fscache_stat_unchecked(&fscache_n_op_pend);
68914 fscache_start_operations(object);
68915 } else {
68916 ASSERTCMP(object->n_in_progress, ==, 0);
68917@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
68918 object->n_exclusive++; /* reads and writes must wait */
68919 atomic_inc(&op->usage);
68920 list_add_tail(&op->pend_link, &object->pending_ops);
68921- fscache_stat(&fscache_n_op_pend);
68922+ fscache_stat_unchecked(&fscache_n_op_pend);
68923 ret = 0;
68924 } else {
68925 /* not allowed to submit ops in any other state */
68926@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object,
68927 if (object->n_exclusive > 0) {
68928 atomic_inc(&op->usage);
68929 list_add_tail(&op->pend_link, &object->pending_ops);
68930- fscache_stat(&fscache_n_op_pend);
68931+ fscache_stat_unchecked(&fscache_n_op_pend);
68932 } else if (!list_empty(&object->pending_ops)) {
68933 atomic_inc(&op->usage);
68934 list_add_tail(&op->pend_link, &object->pending_ops);
68935- fscache_stat(&fscache_n_op_pend);
68936+ fscache_stat_unchecked(&fscache_n_op_pend);
68937 fscache_start_operations(object);
68938 } else {
68939 ASSERTCMP(object->n_exclusive, ==, 0);
68940@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object,
68941 object->n_ops++;
68942 atomic_inc(&op->usage);
68943 list_add_tail(&op->pend_link, &object->pending_ops);
68944- fscache_stat(&fscache_n_op_pend);
68945+ fscache_stat_unchecked(&fscache_n_op_pend);
68946 ret = 0;
68947 } else if (object->state == FSCACHE_OBJECT_DYING ||
68948 object->state == FSCACHE_OBJECT_LC_DYING ||
68949 object->state == FSCACHE_OBJECT_WITHDRAWING) {
68950- fscache_stat(&fscache_n_op_rejected);
68951+ fscache_stat_unchecked(&fscache_n_op_rejected);
68952 ret = -ENOBUFS;
68953 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
68954 fscache_report_unexpected_submission(object, op, ostate);
68955@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op)
68956
68957 ret = -EBUSY;
68958 if (!list_empty(&op->pend_link)) {
68959- fscache_stat(&fscache_n_op_cancelled);
68960+ fscache_stat_unchecked(&fscache_n_op_cancelled);
68961 list_del_init(&op->pend_link);
68962 object->n_ops--;
68963 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
68964@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op)
68965 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
68966 BUG();
68967
68968- fscache_stat(&fscache_n_op_release);
68969+ fscache_stat_unchecked(&fscache_n_op_release);
68970
68971 if (op->release) {
68972 op->release(op);
68973@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op)
68974 * lock, and defer it otherwise */
68975 if (!spin_trylock(&object->lock)) {
68976 _debug("defer put");
68977- fscache_stat(&fscache_n_op_deferred_release);
68978+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
68979
68980 cache = object->cache;
68981 spin_lock(&cache->op_gc_list_lock);
68982@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work)
68983
68984 _debug("GC DEFERRED REL OBJ%x OP%x",
68985 object->debug_id, op->debug_id);
68986- fscache_stat(&fscache_n_op_gc);
68987+ fscache_stat_unchecked(&fscache_n_op_gc);
68988
68989 ASSERTCMP(atomic_read(&op->usage), ==, 0);
68990
68991diff --git a/fs/fscache/page.c b/fs/fscache/page.c
68992index c598ea4..6aac13e 100644
68993--- a/fs/fscache/page.c
68994+++ b/fs/fscache/page.c
68995@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
68996 val = radix_tree_lookup(&cookie->stores, page->index);
68997 if (!val) {
68998 rcu_read_unlock();
68999- fscache_stat(&fscache_n_store_vmscan_not_storing);
69000+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
69001 __fscache_uncache_page(cookie, page);
69002 return true;
69003 }
69004@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
69005 spin_unlock(&cookie->stores_lock);
69006
69007 if (xpage) {
69008- fscache_stat(&fscache_n_store_vmscan_cancelled);
69009- fscache_stat(&fscache_n_store_radix_deletes);
69010+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
69011+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
69012 ASSERTCMP(xpage, ==, page);
69013 } else {
69014- fscache_stat(&fscache_n_store_vmscan_gone);
69015+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
69016 }
69017
69018 wake_up_bit(&cookie->flags, 0);
69019@@ -106,7 +106,7 @@ page_busy:
69020 /* we might want to wait here, but that could deadlock the allocator as
69021 * the slow-work threads writing to the cache may all end up sleeping
69022 * on memory allocation */
69023- fscache_stat(&fscache_n_store_vmscan_busy);
69024+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
69025 return false;
69026 }
69027 EXPORT_SYMBOL(__fscache_maybe_release_page);
69028@@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object,
69029 FSCACHE_COOKIE_STORING_TAG);
69030 if (!radix_tree_tag_get(&cookie->stores, page->index,
69031 FSCACHE_COOKIE_PENDING_TAG)) {
69032- fscache_stat(&fscache_n_store_radix_deletes);
69033+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
69034 xpage = radix_tree_delete(&cookie->stores, page->index);
69035 }
69036 spin_unlock(&cookie->stores_lock);
69037@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
69038
69039 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
69040
69041- fscache_stat(&fscache_n_attr_changed_calls);
69042+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
69043
69044 if (fscache_object_is_active(object)) {
69045 fscache_set_op_state(op, "CallFS");
69046@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
69047
69048 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
69049
69050- fscache_stat(&fscache_n_attr_changed);
69051+ fscache_stat_unchecked(&fscache_n_attr_changed);
69052
69053 op = kzalloc(sizeof(*op), GFP_KERNEL);
69054 if (!op) {
69055- fscache_stat(&fscache_n_attr_changed_nomem);
69056+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
69057 _leave(" = -ENOMEM");
69058 return -ENOMEM;
69059 }
69060@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
69061 if (fscache_submit_exclusive_op(object, op) < 0)
69062 goto nobufs;
69063 spin_unlock(&cookie->lock);
69064- fscache_stat(&fscache_n_attr_changed_ok);
69065+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
69066 fscache_put_operation(op);
69067 _leave(" = 0");
69068 return 0;
69069@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
69070 nobufs:
69071 spin_unlock(&cookie->lock);
69072 kfree(op);
69073- fscache_stat(&fscache_n_attr_changed_nobufs);
69074+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
69075 _leave(" = %d", -ENOBUFS);
69076 return -ENOBUFS;
69077 }
69078@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
69079 /* allocate a retrieval operation and attempt to submit it */
69080 op = kzalloc(sizeof(*op), GFP_NOIO);
69081 if (!op) {
69082- fscache_stat(&fscache_n_retrievals_nomem);
69083+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
69084 return NULL;
69085 }
69086
69087@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
69088 return 0;
69089 }
69090
69091- fscache_stat(&fscache_n_retrievals_wait);
69092+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
69093
69094 jif = jiffies;
69095 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
69096 fscache_wait_bit_interruptible,
69097 TASK_INTERRUPTIBLE) != 0) {
69098- fscache_stat(&fscache_n_retrievals_intr);
69099+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
69100 _leave(" = -ERESTARTSYS");
69101 return -ERESTARTSYS;
69102 }
69103@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
69104 */
69105 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
69106 struct fscache_retrieval *op,
69107- atomic_t *stat_op_waits,
69108- atomic_t *stat_object_dead)
69109+ atomic_unchecked_t *stat_op_waits,
69110+ atomic_unchecked_t *stat_object_dead)
69111 {
69112 int ret;
69113
69114@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
69115 goto check_if_dead;
69116
69117 _debug(">>> WT");
69118- fscache_stat(stat_op_waits);
69119+ fscache_stat_unchecked(stat_op_waits);
69120 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
69121 fscache_wait_bit_interruptible,
69122 TASK_INTERRUPTIBLE) < 0) {
69123@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
69124
69125 check_if_dead:
69126 if (unlikely(fscache_object_is_dead(object))) {
69127- fscache_stat(stat_object_dead);
69128+ fscache_stat_unchecked(stat_object_dead);
69129 return -ENOBUFS;
69130 }
69131 return 0;
69132@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
69133
69134 _enter("%p,%p,,,", cookie, page);
69135
69136- fscache_stat(&fscache_n_retrievals);
69137+ fscache_stat_unchecked(&fscache_n_retrievals);
69138
69139 if (hlist_empty(&cookie->backing_objects))
69140 goto nobufs;
69141@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
69142 goto nobufs_unlock;
69143 spin_unlock(&cookie->lock);
69144
69145- fscache_stat(&fscache_n_retrieval_ops);
69146+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
69147
69148 /* pin the netfs read context in case we need to do the actual netfs
69149 * read because we've encountered a cache read failure */
69150@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
69151
69152 error:
69153 if (ret == -ENOMEM)
69154- fscache_stat(&fscache_n_retrievals_nomem);
69155+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
69156 else if (ret == -ERESTARTSYS)
69157- fscache_stat(&fscache_n_retrievals_intr);
69158+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
69159 else if (ret == -ENODATA)
69160- fscache_stat(&fscache_n_retrievals_nodata);
69161+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
69162 else if (ret < 0)
69163- fscache_stat(&fscache_n_retrievals_nobufs);
69164+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
69165 else
69166- fscache_stat(&fscache_n_retrievals_ok);
69167+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
69168
69169 fscache_put_retrieval(op);
69170 _leave(" = %d", ret);
69171@@ -453,7 +453,7 @@ nobufs_unlock:
69172 spin_unlock(&cookie->lock);
69173 kfree(op);
69174 nobufs:
69175- fscache_stat(&fscache_n_retrievals_nobufs);
69176+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
69177 _leave(" = -ENOBUFS");
69178 return -ENOBUFS;
69179 }
69180@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
69181
69182 _enter("%p,,%d,,,", cookie, *nr_pages);
69183
69184- fscache_stat(&fscache_n_retrievals);
69185+ fscache_stat_unchecked(&fscache_n_retrievals);
69186
69187 if (hlist_empty(&cookie->backing_objects))
69188 goto nobufs;
69189@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
69190 goto nobufs_unlock;
69191 spin_unlock(&cookie->lock);
69192
69193- fscache_stat(&fscache_n_retrieval_ops);
69194+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
69195
69196 /* pin the netfs read context in case we need to do the actual netfs
69197 * read because we've encountered a cache read failure */
69198@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
69199
69200 error:
69201 if (ret == -ENOMEM)
69202- fscache_stat(&fscache_n_retrievals_nomem);
69203+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
69204 else if (ret == -ERESTARTSYS)
69205- fscache_stat(&fscache_n_retrievals_intr);
69206+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
69207 else if (ret == -ENODATA)
69208- fscache_stat(&fscache_n_retrievals_nodata);
69209+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
69210 else if (ret < 0)
69211- fscache_stat(&fscache_n_retrievals_nobufs);
69212+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
69213 else
69214- fscache_stat(&fscache_n_retrievals_ok);
69215+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
69216
69217 fscache_put_retrieval(op);
69218 _leave(" = %d", ret);
69219@@ -570,7 +570,7 @@ nobufs_unlock:
69220 spin_unlock(&cookie->lock);
69221 kfree(op);
69222 nobufs:
69223- fscache_stat(&fscache_n_retrievals_nobufs);
69224+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
69225 _leave(" = -ENOBUFS");
69226 return -ENOBUFS;
69227 }
69228@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
69229
69230 _enter("%p,%p,,,", cookie, page);
69231
69232- fscache_stat(&fscache_n_allocs);
69233+ fscache_stat_unchecked(&fscache_n_allocs);
69234
69235 if (hlist_empty(&cookie->backing_objects))
69236 goto nobufs;
69237@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
69238 goto nobufs_unlock;
69239 spin_unlock(&cookie->lock);
69240
69241- fscache_stat(&fscache_n_alloc_ops);
69242+ fscache_stat_unchecked(&fscache_n_alloc_ops);
69243
69244 ret = fscache_wait_for_retrieval_activation(
69245 object, op,
69246@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
69247
69248 error:
69249 if (ret == -ERESTARTSYS)
69250- fscache_stat(&fscache_n_allocs_intr);
69251+ fscache_stat_unchecked(&fscache_n_allocs_intr);
69252 else if (ret < 0)
69253- fscache_stat(&fscache_n_allocs_nobufs);
69254+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
69255 else
69256- fscache_stat(&fscache_n_allocs_ok);
69257+ fscache_stat_unchecked(&fscache_n_allocs_ok);
69258
69259 fscache_put_retrieval(op);
69260 _leave(" = %d", ret);
69261@@ -651,7 +651,7 @@ nobufs_unlock:
69262 spin_unlock(&cookie->lock);
69263 kfree(op);
69264 nobufs:
69265- fscache_stat(&fscache_n_allocs_nobufs);
69266+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
69267 _leave(" = -ENOBUFS");
69268 return -ENOBUFS;
69269 }
69270@@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op)
69271
69272 spin_lock(&cookie->stores_lock);
69273
69274- fscache_stat(&fscache_n_store_calls);
69275+ fscache_stat_unchecked(&fscache_n_store_calls);
69276
69277 /* find a page to store */
69278 page = NULL;
69279@@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op)
69280 page = results[0];
69281 _debug("gang %d [%lx]", n, page->index);
69282 if (page->index > op->store_limit) {
69283- fscache_stat(&fscache_n_store_pages_over_limit);
69284+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
69285 goto superseded;
69286 }
69287
69288@@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op)
69289
69290 if (page) {
69291 fscache_set_op_state(&op->op, "Store");
69292- fscache_stat(&fscache_n_store_pages);
69293+ fscache_stat_unchecked(&fscache_n_store_pages);
69294 fscache_stat(&fscache_n_cop_write_page);
69295 ret = object->cache->ops->write_page(op, page);
69296 fscache_stat_d(&fscache_n_cop_write_page);
69297@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
69298 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
69299 ASSERT(PageFsCache(page));
69300
69301- fscache_stat(&fscache_n_stores);
69302+ fscache_stat_unchecked(&fscache_n_stores);
69303
69304 op = kzalloc(sizeof(*op), GFP_NOIO);
69305 if (!op)
69306@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
69307 spin_unlock(&cookie->stores_lock);
69308 spin_unlock(&object->lock);
69309
69310- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
69311+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
69312 op->store_limit = object->store_limit;
69313
69314 if (fscache_submit_op(object, &op->op) < 0)
69315@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
69316
69317 spin_unlock(&cookie->lock);
69318 radix_tree_preload_end();
69319- fscache_stat(&fscache_n_store_ops);
69320- fscache_stat(&fscache_n_stores_ok);
69321+ fscache_stat_unchecked(&fscache_n_store_ops);
69322+ fscache_stat_unchecked(&fscache_n_stores_ok);
69323
69324 /* the slow work queue now carries its own ref on the object */
69325 fscache_put_operation(&op->op);
69326@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
69327 return 0;
69328
69329 already_queued:
69330- fscache_stat(&fscache_n_stores_again);
69331+ fscache_stat_unchecked(&fscache_n_stores_again);
69332 already_pending:
69333 spin_unlock(&cookie->stores_lock);
69334 spin_unlock(&object->lock);
69335 spin_unlock(&cookie->lock);
69336 radix_tree_preload_end();
69337 kfree(op);
69338- fscache_stat(&fscache_n_stores_ok);
69339+ fscache_stat_unchecked(&fscache_n_stores_ok);
69340 _leave(" = 0");
69341 return 0;
69342
69343@@ -886,14 +886,14 @@ nobufs:
69344 spin_unlock(&cookie->lock);
69345 radix_tree_preload_end();
69346 kfree(op);
69347- fscache_stat(&fscache_n_stores_nobufs);
69348+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
69349 _leave(" = -ENOBUFS");
69350 return -ENOBUFS;
69351
69352 nomem_free:
69353 kfree(op);
69354 nomem:
69355- fscache_stat(&fscache_n_stores_oom);
69356+ fscache_stat_unchecked(&fscache_n_stores_oom);
69357 _leave(" = -ENOMEM");
69358 return -ENOMEM;
69359 }
69360@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
69361 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
69362 ASSERTCMP(page, !=, NULL);
69363
69364- fscache_stat(&fscache_n_uncaches);
69365+ fscache_stat_unchecked(&fscache_n_uncaches);
69366
69367 /* cache withdrawal may beat us to it */
69368 if (!PageFsCache(page))
69369@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
69370 unsigned long loop;
69371
69372 #ifdef CONFIG_FSCACHE_STATS
69373- atomic_add(pagevec->nr, &fscache_n_marks);
69374+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
69375 #endif
69376
69377 for (loop = 0; loop < pagevec->nr; loop++) {
69378diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
69379index 46435f3..8cddf18 100644
69380--- a/fs/fscache/stats.c
69381+++ b/fs/fscache/stats.c
69382@@ -18,95 +18,95 @@
69383 /*
69384 * operation counters
69385 */
69386-atomic_t fscache_n_op_pend;
69387-atomic_t fscache_n_op_run;
69388-atomic_t fscache_n_op_enqueue;
69389-atomic_t fscache_n_op_requeue;
69390-atomic_t fscache_n_op_deferred_release;
69391-atomic_t fscache_n_op_release;
69392-atomic_t fscache_n_op_gc;
69393-atomic_t fscache_n_op_cancelled;
69394-atomic_t fscache_n_op_rejected;
69395+atomic_unchecked_t fscache_n_op_pend;
69396+atomic_unchecked_t fscache_n_op_run;
69397+atomic_unchecked_t fscache_n_op_enqueue;
69398+atomic_unchecked_t fscache_n_op_requeue;
69399+atomic_unchecked_t fscache_n_op_deferred_release;
69400+atomic_unchecked_t fscache_n_op_release;
69401+atomic_unchecked_t fscache_n_op_gc;
69402+atomic_unchecked_t fscache_n_op_cancelled;
69403+atomic_unchecked_t fscache_n_op_rejected;
69404
69405-atomic_t fscache_n_attr_changed;
69406-atomic_t fscache_n_attr_changed_ok;
69407-atomic_t fscache_n_attr_changed_nobufs;
69408-atomic_t fscache_n_attr_changed_nomem;
69409-atomic_t fscache_n_attr_changed_calls;
69410+atomic_unchecked_t fscache_n_attr_changed;
69411+atomic_unchecked_t fscache_n_attr_changed_ok;
69412+atomic_unchecked_t fscache_n_attr_changed_nobufs;
69413+atomic_unchecked_t fscache_n_attr_changed_nomem;
69414+atomic_unchecked_t fscache_n_attr_changed_calls;
69415
69416-atomic_t fscache_n_allocs;
69417-atomic_t fscache_n_allocs_ok;
69418-atomic_t fscache_n_allocs_wait;
69419-atomic_t fscache_n_allocs_nobufs;
69420-atomic_t fscache_n_allocs_intr;
69421-atomic_t fscache_n_allocs_object_dead;
69422-atomic_t fscache_n_alloc_ops;
69423-atomic_t fscache_n_alloc_op_waits;
69424+atomic_unchecked_t fscache_n_allocs;
69425+atomic_unchecked_t fscache_n_allocs_ok;
69426+atomic_unchecked_t fscache_n_allocs_wait;
69427+atomic_unchecked_t fscache_n_allocs_nobufs;
69428+atomic_unchecked_t fscache_n_allocs_intr;
69429+atomic_unchecked_t fscache_n_allocs_object_dead;
69430+atomic_unchecked_t fscache_n_alloc_ops;
69431+atomic_unchecked_t fscache_n_alloc_op_waits;
69432
69433-atomic_t fscache_n_retrievals;
69434-atomic_t fscache_n_retrievals_ok;
69435-atomic_t fscache_n_retrievals_wait;
69436-atomic_t fscache_n_retrievals_nodata;
69437-atomic_t fscache_n_retrievals_nobufs;
69438-atomic_t fscache_n_retrievals_intr;
69439-atomic_t fscache_n_retrievals_nomem;
69440-atomic_t fscache_n_retrievals_object_dead;
69441-atomic_t fscache_n_retrieval_ops;
69442-atomic_t fscache_n_retrieval_op_waits;
69443+atomic_unchecked_t fscache_n_retrievals;
69444+atomic_unchecked_t fscache_n_retrievals_ok;
69445+atomic_unchecked_t fscache_n_retrievals_wait;
69446+atomic_unchecked_t fscache_n_retrievals_nodata;
69447+atomic_unchecked_t fscache_n_retrievals_nobufs;
69448+atomic_unchecked_t fscache_n_retrievals_intr;
69449+atomic_unchecked_t fscache_n_retrievals_nomem;
69450+atomic_unchecked_t fscache_n_retrievals_object_dead;
69451+atomic_unchecked_t fscache_n_retrieval_ops;
69452+atomic_unchecked_t fscache_n_retrieval_op_waits;
69453
69454-atomic_t fscache_n_stores;
69455-atomic_t fscache_n_stores_ok;
69456-atomic_t fscache_n_stores_again;
69457-atomic_t fscache_n_stores_nobufs;
69458-atomic_t fscache_n_stores_oom;
69459-atomic_t fscache_n_store_ops;
69460-atomic_t fscache_n_store_calls;
69461-atomic_t fscache_n_store_pages;
69462-atomic_t fscache_n_store_radix_deletes;
69463-atomic_t fscache_n_store_pages_over_limit;
69464+atomic_unchecked_t fscache_n_stores;
69465+atomic_unchecked_t fscache_n_stores_ok;
69466+atomic_unchecked_t fscache_n_stores_again;
69467+atomic_unchecked_t fscache_n_stores_nobufs;
69468+atomic_unchecked_t fscache_n_stores_oom;
69469+atomic_unchecked_t fscache_n_store_ops;
69470+atomic_unchecked_t fscache_n_store_calls;
69471+atomic_unchecked_t fscache_n_store_pages;
69472+atomic_unchecked_t fscache_n_store_radix_deletes;
69473+atomic_unchecked_t fscache_n_store_pages_over_limit;
69474
69475-atomic_t fscache_n_store_vmscan_not_storing;
69476-atomic_t fscache_n_store_vmscan_gone;
69477-atomic_t fscache_n_store_vmscan_busy;
69478-atomic_t fscache_n_store_vmscan_cancelled;
69479+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
69480+atomic_unchecked_t fscache_n_store_vmscan_gone;
69481+atomic_unchecked_t fscache_n_store_vmscan_busy;
69482+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
69483
69484-atomic_t fscache_n_marks;
69485-atomic_t fscache_n_uncaches;
69486+atomic_unchecked_t fscache_n_marks;
69487+atomic_unchecked_t fscache_n_uncaches;
69488
69489-atomic_t fscache_n_acquires;
69490-atomic_t fscache_n_acquires_null;
69491-atomic_t fscache_n_acquires_no_cache;
69492-atomic_t fscache_n_acquires_ok;
69493-atomic_t fscache_n_acquires_nobufs;
69494-atomic_t fscache_n_acquires_oom;
69495+atomic_unchecked_t fscache_n_acquires;
69496+atomic_unchecked_t fscache_n_acquires_null;
69497+atomic_unchecked_t fscache_n_acquires_no_cache;
69498+atomic_unchecked_t fscache_n_acquires_ok;
69499+atomic_unchecked_t fscache_n_acquires_nobufs;
69500+atomic_unchecked_t fscache_n_acquires_oom;
69501
69502-atomic_t fscache_n_updates;
69503-atomic_t fscache_n_updates_null;
69504-atomic_t fscache_n_updates_run;
69505+atomic_unchecked_t fscache_n_updates;
69506+atomic_unchecked_t fscache_n_updates_null;
69507+atomic_unchecked_t fscache_n_updates_run;
69508
69509-atomic_t fscache_n_relinquishes;
69510-atomic_t fscache_n_relinquishes_null;
69511-atomic_t fscache_n_relinquishes_waitcrt;
69512-atomic_t fscache_n_relinquishes_retire;
69513+atomic_unchecked_t fscache_n_relinquishes;
69514+atomic_unchecked_t fscache_n_relinquishes_null;
69515+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
69516+atomic_unchecked_t fscache_n_relinquishes_retire;
69517
69518-atomic_t fscache_n_cookie_index;
69519-atomic_t fscache_n_cookie_data;
69520-atomic_t fscache_n_cookie_special;
69521+atomic_unchecked_t fscache_n_cookie_index;
69522+atomic_unchecked_t fscache_n_cookie_data;
69523+atomic_unchecked_t fscache_n_cookie_special;
69524
69525-atomic_t fscache_n_object_alloc;
69526-atomic_t fscache_n_object_no_alloc;
69527-atomic_t fscache_n_object_lookups;
69528-atomic_t fscache_n_object_lookups_negative;
69529-atomic_t fscache_n_object_lookups_positive;
69530-atomic_t fscache_n_object_lookups_timed_out;
69531-atomic_t fscache_n_object_created;
69532-atomic_t fscache_n_object_avail;
69533-atomic_t fscache_n_object_dead;
69534+atomic_unchecked_t fscache_n_object_alloc;
69535+atomic_unchecked_t fscache_n_object_no_alloc;
69536+atomic_unchecked_t fscache_n_object_lookups;
69537+atomic_unchecked_t fscache_n_object_lookups_negative;
69538+atomic_unchecked_t fscache_n_object_lookups_positive;
69539+atomic_unchecked_t fscache_n_object_lookups_timed_out;
69540+atomic_unchecked_t fscache_n_object_created;
69541+atomic_unchecked_t fscache_n_object_avail;
69542+atomic_unchecked_t fscache_n_object_dead;
69543
69544-atomic_t fscache_n_checkaux_none;
69545-atomic_t fscache_n_checkaux_okay;
69546-atomic_t fscache_n_checkaux_update;
69547-atomic_t fscache_n_checkaux_obsolete;
69548+atomic_unchecked_t fscache_n_checkaux_none;
69549+atomic_unchecked_t fscache_n_checkaux_okay;
69550+atomic_unchecked_t fscache_n_checkaux_update;
69551+atomic_unchecked_t fscache_n_checkaux_obsolete;
69552
69553 atomic_t fscache_n_cop_alloc_object;
69554 atomic_t fscache_n_cop_lookup_object;
69555@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
69556 seq_puts(m, "FS-Cache statistics\n");
69557
69558 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
69559- atomic_read(&fscache_n_cookie_index),
69560- atomic_read(&fscache_n_cookie_data),
69561- atomic_read(&fscache_n_cookie_special));
69562+ atomic_read_unchecked(&fscache_n_cookie_index),
69563+ atomic_read_unchecked(&fscache_n_cookie_data),
69564+ atomic_read_unchecked(&fscache_n_cookie_special));
69565
69566 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
69567- atomic_read(&fscache_n_object_alloc),
69568- atomic_read(&fscache_n_object_no_alloc),
69569- atomic_read(&fscache_n_object_avail),
69570- atomic_read(&fscache_n_object_dead));
69571+ atomic_read_unchecked(&fscache_n_object_alloc),
69572+ atomic_read_unchecked(&fscache_n_object_no_alloc),
69573+ atomic_read_unchecked(&fscache_n_object_avail),
69574+ atomic_read_unchecked(&fscache_n_object_dead));
69575 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
69576- atomic_read(&fscache_n_checkaux_none),
69577- atomic_read(&fscache_n_checkaux_okay),
69578- atomic_read(&fscache_n_checkaux_update),
69579- atomic_read(&fscache_n_checkaux_obsolete));
69580+ atomic_read_unchecked(&fscache_n_checkaux_none),
69581+ atomic_read_unchecked(&fscache_n_checkaux_okay),
69582+ atomic_read_unchecked(&fscache_n_checkaux_update),
69583+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
69584
69585 seq_printf(m, "Pages : mrk=%u unc=%u\n",
69586- atomic_read(&fscache_n_marks),
69587- atomic_read(&fscache_n_uncaches));
69588+ atomic_read_unchecked(&fscache_n_marks),
69589+ atomic_read_unchecked(&fscache_n_uncaches));
69590
69591 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
69592 " oom=%u\n",
69593- atomic_read(&fscache_n_acquires),
69594- atomic_read(&fscache_n_acquires_null),
69595- atomic_read(&fscache_n_acquires_no_cache),
69596- atomic_read(&fscache_n_acquires_ok),
69597- atomic_read(&fscache_n_acquires_nobufs),
69598- atomic_read(&fscache_n_acquires_oom));
69599+ atomic_read_unchecked(&fscache_n_acquires),
69600+ atomic_read_unchecked(&fscache_n_acquires_null),
69601+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
69602+ atomic_read_unchecked(&fscache_n_acquires_ok),
69603+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
69604+ atomic_read_unchecked(&fscache_n_acquires_oom));
69605
69606 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
69607- atomic_read(&fscache_n_object_lookups),
69608- atomic_read(&fscache_n_object_lookups_negative),
69609- atomic_read(&fscache_n_object_lookups_positive),
69610- atomic_read(&fscache_n_object_lookups_timed_out),
69611- atomic_read(&fscache_n_object_created));
69612+ atomic_read_unchecked(&fscache_n_object_lookups),
69613+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
69614+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
69615+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
69616+ atomic_read_unchecked(&fscache_n_object_created));
69617
69618 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
69619- atomic_read(&fscache_n_updates),
69620- atomic_read(&fscache_n_updates_null),
69621- atomic_read(&fscache_n_updates_run));
69622+ atomic_read_unchecked(&fscache_n_updates),
69623+ atomic_read_unchecked(&fscache_n_updates_null),
69624+ atomic_read_unchecked(&fscache_n_updates_run));
69625
69626 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
69627- atomic_read(&fscache_n_relinquishes),
69628- atomic_read(&fscache_n_relinquishes_null),
69629- atomic_read(&fscache_n_relinquishes_waitcrt),
69630- atomic_read(&fscache_n_relinquishes_retire));
69631+ atomic_read_unchecked(&fscache_n_relinquishes),
69632+ atomic_read_unchecked(&fscache_n_relinquishes_null),
69633+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
69634+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
69635
69636 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
69637- atomic_read(&fscache_n_attr_changed),
69638- atomic_read(&fscache_n_attr_changed_ok),
69639- atomic_read(&fscache_n_attr_changed_nobufs),
69640- atomic_read(&fscache_n_attr_changed_nomem),
69641- atomic_read(&fscache_n_attr_changed_calls));
69642+ atomic_read_unchecked(&fscache_n_attr_changed),
69643+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
69644+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
69645+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
69646+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
69647
69648 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
69649- atomic_read(&fscache_n_allocs),
69650- atomic_read(&fscache_n_allocs_ok),
69651- atomic_read(&fscache_n_allocs_wait),
69652- atomic_read(&fscache_n_allocs_nobufs),
69653- atomic_read(&fscache_n_allocs_intr));
69654+ atomic_read_unchecked(&fscache_n_allocs),
69655+ atomic_read_unchecked(&fscache_n_allocs_ok),
69656+ atomic_read_unchecked(&fscache_n_allocs_wait),
69657+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
69658+ atomic_read_unchecked(&fscache_n_allocs_intr));
69659 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
69660- atomic_read(&fscache_n_alloc_ops),
69661- atomic_read(&fscache_n_alloc_op_waits),
69662- atomic_read(&fscache_n_allocs_object_dead));
69663+ atomic_read_unchecked(&fscache_n_alloc_ops),
69664+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
69665+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
69666
69667 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
69668 " int=%u oom=%u\n",
69669- atomic_read(&fscache_n_retrievals),
69670- atomic_read(&fscache_n_retrievals_ok),
69671- atomic_read(&fscache_n_retrievals_wait),
69672- atomic_read(&fscache_n_retrievals_nodata),
69673- atomic_read(&fscache_n_retrievals_nobufs),
69674- atomic_read(&fscache_n_retrievals_intr),
69675- atomic_read(&fscache_n_retrievals_nomem));
69676+ atomic_read_unchecked(&fscache_n_retrievals),
69677+ atomic_read_unchecked(&fscache_n_retrievals_ok),
69678+ atomic_read_unchecked(&fscache_n_retrievals_wait),
69679+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
69680+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
69681+ atomic_read_unchecked(&fscache_n_retrievals_intr),
69682+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
69683 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
69684- atomic_read(&fscache_n_retrieval_ops),
69685- atomic_read(&fscache_n_retrieval_op_waits),
69686- atomic_read(&fscache_n_retrievals_object_dead));
69687+ atomic_read_unchecked(&fscache_n_retrieval_ops),
69688+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
69689+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
69690
69691 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
69692- atomic_read(&fscache_n_stores),
69693- atomic_read(&fscache_n_stores_ok),
69694- atomic_read(&fscache_n_stores_again),
69695- atomic_read(&fscache_n_stores_nobufs),
69696- atomic_read(&fscache_n_stores_oom));
69697+ atomic_read_unchecked(&fscache_n_stores),
69698+ atomic_read_unchecked(&fscache_n_stores_ok),
69699+ atomic_read_unchecked(&fscache_n_stores_again),
69700+ atomic_read_unchecked(&fscache_n_stores_nobufs),
69701+ atomic_read_unchecked(&fscache_n_stores_oom));
69702 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
69703- atomic_read(&fscache_n_store_ops),
69704- atomic_read(&fscache_n_store_calls),
69705- atomic_read(&fscache_n_store_pages),
69706- atomic_read(&fscache_n_store_radix_deletes),
69707- atomic_read(&fscache_n_store_pages_over_limit));
69708+ atomic_read_unchecked(&fscache_n_store_ops),
69709+ atomic_read_unchecked(&fscache_n_store_calls),
69710+ atomic_read_unchecked(&fscache_n_store_pages),
69711+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
69712+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
69713
69714 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
69715- atomic_read(&fscache_n_store_vmscan_not_storing),
69716- atomic_read(&fscache_n_store_vmscan_gone),
69717- atomic_read(&fscache_n_store_vmscan_busy),
69718- atomic_read(&fscache_n_store_vmscan_cancelled));
69719+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
69720+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
69721+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
69722+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
69723
69724 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
69725- atomic_read(&fscache_n_op_pend),
69726- atomic_read(&fscache_n_op_run),
69727- atomic_read(&fscache_n_op_enqueue),
69728- atomic_read(&fscache_n_op_cancelled),
69729- atomic_read(&fscache_n_op_rejected));
69730+ atomic_read_unchecked(&fscache_n_op_pend),
69731+ atomic_read_unchecked(&fscache_n_op_run),
69732+ atomic_read_unchecked(&fscache_n_op_enqueue),
69733+ atomic_read_unchecked(&fscache_n_op_cancelled),
69734+ atomic_read_unchecked(&fscache_n_op_rejected));
69735 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
69736- atomic_read(&fscache_n_op_deferred_release),
69737- atomic_read(&fscache_n_op_release),
69738- atomic_read(&fscache_n_op_gc));
69739+ atomic_read_unchecked(&fscache_n_op_deferred_release),
69740+ atomic_read_unchecked(&fscache_n_op_release),
69741+ atomic_read_unchecked(&fscache_n_op_gc));
69742
69743 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
69744 atomic_read(&fscache_n_cop_alloc_object),
69745diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
69746index de792dc..448b532 100644
69747--- a/fs/fuse/cuse.c
69748+++ b/fs/fuse/cuse.c
69749@@ -576,10 +576,12 @@ static int __init cuse_init(void)
69750 INIT_LIST_HEAD(&cuse_conntbl[i]);
69751
69752 /* inherit and extend fuse_dev_operations */
69753- cuse_channel_fops = fuse_dev_operations;
69754- cuse_channel_fops.owner = THIS_MODULE;
69755- cuse_channel_fops.open = cuse_channel_open;
69756- cuse_channel_fops.release = cuse_channel_release;
69757+ pax_open_kernel();
69758+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
69759+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
69760+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
69761+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
69762+ pax_close_kernel();
69763
69764 cuse_class = class_create(THIS_MODULE, "cuse");
69765 if (IS_ERR(cuse_class))
69766diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
69767index 1facb39..7f48557 100644
69768--- a/fs/fuse/dev.c
69769+++ b/fs/fuse/dev.c
69770@@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
69771 {
69772 struct fuse_notify_inval_entry_out outarg;
69773 int err = -EINVAL;
69774- char buf[FUSE_NAME_MAX+1];
69775+ char *buf = NULL;
69776 struct qstr name;
69777
69778 if (size < sizeof(outarg))
69779@@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
69780 if (outarg.namelen > FUSE_NAME_MAX)
69781 goto err;
69782
69783+ err = -ENOMEM;
69784+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
69785+ if (!buf)
69786+ goto err;
69787+
69788 err = -EINVAL;
69789 if (size != sizeof(outarg) + outarg.namelen + 1)
69790 goto err;
69791@@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
69792
69793 down_read(&fc->killsb);
69794 err = -ENOENT;
69795- if (!fc->sb)
69796- goto err_unlock;
69797-
69798- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
69799-
69800-err_unlock:
69801+ if (fc->sb)
69802+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
69803 up_read(&fc->killsb);
69804+ kfree(buf);
69805 return err;
69806
69807 err:
69808 fuse_copy_finish(cs);
69809+ kfree(buf);
69810 return err;
69811 }
69812
69813diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
69814index 4787ae6..73efff7 100644
69815--- a/fs/fuse/dir.c
69816+++ b/fs/fuse/dir.c
69817@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry)
69818 return link;
69819 }
69820
69821-static void free_link(char *link)
69822+static void free_link(const char *link)
69823 {
69824 if (!IS_ERR(link))
69825 free_page((unsigned long) link);
69826diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
69827index 247436c..e650ccb 100644
69828--- a/fs/gfs2/ops_inode.c
69829+++ b/fs/gfs2/ops_inode.c
69830@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
69831 unsigned int x;
69832 int error;
69833
69834+ pax_track_stack();
69835+
69836 if (ndentry->d_inode) {
69837 nip = GFS2_I(ndentry->d_inode);
69838 if (ip == nip)
69839diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
69840index 4463297..4fed53b 100644
69841--- a/fs/gfs2/sys.c
69842+++ b/fs/gfs2/sys.c
69843@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
69844 return a->store ? a->store(sdp, buf, len) : len;
69845 }
69846
69847-static struct sysfs_ops gfs2_attr_ops = {
69848+static const struct sysfs_ops gfs2_attr_ops = {
69849 .show = gfs2_attr_show,
69850 .store = gfs2_attr_store,
69851 };
69852@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
69853 return 0;
69854 }
69855
69856-static struct kset_uevent_ops gfs2_uevent_ops = {
69857+static const struct kset_uevent_ops gfs2_uevent_ops = {
69858 .uevent = gfs2_uevent,
69859 };
69860
69861diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
69862index f6874ac..7cd98a8 100644
69863--- a/fs/hfsplus/catalog.c
69864+++ b/fs/hfsplus/catalog.c
69865@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
69866 int err;
69867 u16 type;
69868
69869+ pax_track_stack();
69870+
69871 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
69872 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
69873 if (err)
69874@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino
69875 int entry_size;
69876 int err;
69877
69878+ pax_track_stack();
69879+
69880 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
69881 sb = dir->i_sb;
69882 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
69883@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
69884 int entry_size, type;
69885 int err = 0;
69886
69887+ pax_track_stack();
69888+
69889 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
69890 dst_dir->i_ino, dst_name->name);
69891 sb = src_dir->i_sb;
69892diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
69893index 5f40236..dac3421 100644
69894--- a/fs/hfsplus/dir.c
69895+++ b/fs/hfsplus/dir.c
69896@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
69897 struct hfsplus_readdir_data *rd;
69898 u16 type;
69899
69900+ pax_track_stack();
69901+
69902 if (filp->f_pos >= inode->i_size)
69903 return 0;
69904
69905diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
69906index 1bcf597..905a251 100644
69907--- a/fs/hfsplus/inode.c
69908+++ b/fs/hfsplus/inode.c
69909@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
69910 int res = 0;
69911 u16 type;
69912
69913+ pax_track_stack();
69914+
69915 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
69916
69917 HFSPLUS_I(inode).dev = 0;
69918@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
69919 struct hfs_find_data fd;
69920 hfsplus_cat_entry entry;
69921
69922+ pax_track_stack();
69923+
69924 if (HFSPLUS_IS_RSRC(inode))
69925 main_inode = HFSPLUS_I(inode).rsrc_inode;
69926
69927diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
69928index f457d2c..7ef4ad5 100644
69929--- a/fs/hfsplus/ioctl.c
69930+++ b/fs/hfsplus/ioctl.c
69931@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
69932 struct hfsplus_cat_file *file;
69933 int res;
69934
69935+ pax_track_stack();
69936+
69937 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
69938 return -EOPNOTSUPP;
69939
69940@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
69941 struct hfsplus_cat_file *file;
69942 ssize_t res = 0;
69943
69944+ pax_track_stack();
69945+
69946 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
69947 return -EOPNOTSUPP;
69948
69949diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
69950index 43022f3..7298079 100644
69951--- a/fs/hfsplus/super.c
69952+++ b/fs/hfsplus/super.c
69953@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
69954 struct nls_table *nls = NULL;
69955 int err = -EINVAL;
69956
69957+ pax_track_stack();
69958+
69959 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
69960 if (!sbi)
69961 return -ENOMEM;
69962diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
69963index 87a1258..5694d91 100644
69964--- a/fs/hugetlbfs/inode.c
69965+++ b/fs/hugetlbfs/inode.c
69966@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
69967 .kill_sb = kill_litter_super,
69968 };
69969
69970-static struct vfsmount *hugetlbfs_vfsmount;
69971+struct vfsmount *hugetlbfs_vfsmount;
69972
69973 static int can_do_hugetlb_shm(void)
69974 {
69975diff --git a/fs/ioctl.c b/fs/ioctl.c
69976index 6c75110..19d2c3c 100644
69977--- a/fs/ioctl.c
69978+++ b/fs/ioctl.c
69979@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
69980 u64 phys, u64 len, u32 flags)
69981 {
69982 struct fiemap_extent extent;
69983- struct fiemap_extent *dest = fieinfo->fi_extents_start;
69984+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
69985
69986 /* only count the extents */
69987 if (fieinfo->fi_extents_max == 0) {
69988@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
69989
69990 fieinfo.fi_flags = fiemap.fm_flags;
69991 fieinfo.fi_extents_max = fiemap.fm_extent_count;
69992- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
69993+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
69994
69995 if (fiemap.fm_extent_count != 0 &&
69996 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
69997@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
69998 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
69999 fiemap.fm_flags = fieinfo.fi_flags;
70000 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
70001- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
70002+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
70003 error = -EFAULT;
70004
70005 return error;
70006diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
70007index b0435dd..81ee0be 100644
70008--- a/fs/jbd/checkpoint.c
70009+++ b/fs/jbd/checkpoint.c
70010@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal)
70011 tid_t this_tid;
70012 int result;
70013
70014+ pax_track_stack();
70015+
70016 jbd_debug(1, "Start checkpoint\n");
70017
70018 /*
70019diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
70020index 546d153..736896c 100644
70021--- a/fs/jffs2/compr_rtime.c
70022+++ b/fs/jffs2/compr_rtime.c
70023@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
70024 int outpos = 0;
70025 int pos=0;
70026
70027+ pax_track_stack();
70028+
70029 memset(positions,0,sizeof(positions));
70030
70031 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
70032@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
70033 int outpos = 0;
70034 int pos=0;
70035
70036+ pax_track_stack();
70037+
70038 memset(positions,0,sizeof(positions));
70039
70040 while (outpos<destlen) {
70041diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
70042index 170d289..3254b98 100644
70043--- a/fs/jffs2/compr_rubin.c
70044+++ b/fs/jffs2/compr_rubin.c
70045@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
70046 int ret;
70047 uint32_t mysrclen, mydstlen;
70048
70049+ pax_track_stack();
70050+
70051 mysrclen = *sourcelen;
70052 mydstlen = *dstlen - 8;
70053
70054diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
70055index b47679b..00d65d3 100644
70056--- a/fs/jffs2/erase.c
70057+++ b/fs/jffs2/erase.c
70058@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
70059 struct jffs2_unknown_node marker = {
70060 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
70061 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
70062- .totlen = cpu_to_je32(c->cleanmarker_size)
70063+ .totlen = cpu_to_je32(c->cleanmarker_size),
70064+ .hdr_crc = cpu_to_je32(0)
70065 };
70066
70067 jffs2_prealloc_raw_node_refs(c, jeb, 1);
70068diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
70069index 5ef7bac..4fd1e3c 100644
70070--- a/fs/jffs2/wbuf.c
70071+++ b/fs/jffs2/wbuf.c
70072@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
70073 {
70074 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
70075 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
70076- .totlen = constant_cpu_to_je32(8)
70077+ .totlen = constant_cpu_to_je32(8),
70078+ .hdr_crc = constant_cpu_to_je32(0)
70079 };
70080
70081 /*
70082diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
70083index 082e844..52012a1 100644
70084--- a/fs/jffs2/xattr.c
70085+++ b/fs/jffs2/xattr.c
70086@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
70087
70088 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
70089
70090+ pax_track_stack();
70091+
70092 /* Phase.1 : Merge same xref */
70093 for (i=0; i < XREF_TMPHASH_SIZE; i++)
70094 xref_tmphash[i] = NULL;
70095diff --git a/fs/jfs/super.c b/fs/jfs/super.c
70096index 2234c73..f6e6e6b 100644
70097--- a/fs/jfs/super.c
70098+++ b/fs/jfs/super.c
70099@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
70100
70101 jfs_inode_cachep =
70102 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
70103- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
70104+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
70105 init_once);
70106 if (jfs_inode_cachep == NULL)
70107 return -ENOMEM;
70108diff --git a/fs/libfs.c b/fs/libfs.c
70109index ba36e93..3153fce 100644
70110--- a/fs/libfs.c
70111+++ b/fs/libfs.c
70112@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
70113
70114 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
70115 struct dentry *next;
70116+ char d_name[sizeof(next->d_iname)];
70117+ const unsigned char *name;
70118+
70119 next = list_entry(p, struct dentry, d_u.d_child);
70120 if (d_unhashed(next) || !next->d_inode)
70121 continue;
70122
70123 spin_unlock(&dcache_lock);
70124- if (filldir(dirent, next->d_name.name,
70125+ name = next->d_name.name;
70126+ if (name == next->d_iname) {
70127+ memcpy(d_name, name, next->d_name.len);
70128+ name = d_name;
70129+ }
70130+ if (filldir(dirent, name,
70131 next->d_name.len, filp->f_pos,
70132 next->d_inode->i_ino,
70133 dt_type(next->d_inode)) < 0)
70134diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
70135index c325a83..d15b07b 100644
70136--- a/fs/lockd/clntproc.c
70137+++ b/fs/lockd/clntproc.c
70138@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
70139 /*
70140 * Cookie counter for NLM requests
70141 */
70142-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
70143+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
70144
70145 void nlmclnt_next_cookie(struct nlm_cookie *c)
70146 {
70147- u32 cookie = atomic_inc_return(&nlm_cookie);
70148+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
70149
70150 memcpy(c->data, &cookie, 4);
70151 c->len=4;
70152@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
70153 struct nlm_rqst reqst, *req;
70154 int status;
70155
70156+ pax_track_stack();
70157+
70158 req = &reqst;
70159 memset(req, 0, sizeof(*req));
70160 locks_init_lock(&req->a_args.lock.fl);
70161diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
70162index 1a54ae1..6a16c27 100644
70163--- a/fs/lockd/svc.c
70164+++ b/fs/lockd/svc.c
70165@@ -43,7 +43,7 @@
70166
70167 static struct svc_program nlmsvc_program;
70168
70169-struct nlmsvc_binding * nlmsvc_ops;
70170+const struct nlmsvc_binding * nlmsvc_ops;
70171 EXPORT_SYMBOL_GPL(nlmsvc_ops);
70172
70173 static DEFINE_MUTEX(nlmsvc_mutex);
70174diff --git a/fs/locks.c b/fs/locks.c
70175index a8794f2..4041e55 100644
70176--- a/fs/locks.c
70177+++ b/fs/locks.c
70178@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
70179
70180 static struct kmem_cache *filelock_cache __read_mostly;
70181
70182+static void locks_init_lock_always(struct file_lock *fl)
70183+{
70184+ fl->fl_next = NULL;
70185+ fl->fl_fasync = NULL;
70186+ fl->fl_owner = NULL;
70187+ fl->fl_pid = 0;
70188+ fl->fl_nspid = NULL;
70189+ fl->fl_file = NULL;
70190+ fl->fl_flags = 0;
70191+ fl->fl_type = 0;
70192+ fl->fl_start = fl->fl_end = 0;
70193+}
70194+
70195 /* Allocate an empty lock structure. */
70196 static struct file_lock *locks_alloc_lock(void)
70197 {
70198- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
70199+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
70200+
70201+ if (fl)
70202+ locks_init_lock_always(fl);
70203+
70204+ return fl;
70205 }
70206
70207 void locks_release_private(struct file_lock *fl)
70208@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl)
70209 INIT_LIST_HEAD(&fl->fl_link);
70210 INIT_LIST_HEAD(&fl->fl_block);
70211 init_waitqueue_head(&fl->fl_wait);
70212- fl->fl_next = NULL;
70213- fl->fl_fasync = NULL;
70214- fl->fl_owner = NULL;
70215- fl->fl_pid = 0;
70216- fl->fl_nspid = NULL;
70217- fl->fl_file = NULL;
70218- fl->fl_flags = 0;
70219- fl->fl_type = 0;
70220- fl->fl_start = fl->fl_end = 0;
70221 fl->fl_ops = NULL;
70222 fl->fl_lmops = NULL;
70223+ locks_init_lock_always(fl);
70224 }
70225
70226 EXPORT_SYMBOL(locks_init_lock);
70227@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp)
70228 return;
70229
70230 if (filp->f_op && filp->f_op->flock) {
70231- struct file_lock fl = {
70232+ struct file_lock flock = {
70233 .fl_pid = current->tgid,
70234 .fl_file = filp,
70235 .fl_flags = FL_FLOCK,
70236 .fl_type = F_UNLCK,
70237 .fl_end = OFFSET_MAX,
70238 };
70239- filp->f_op->flock(filp, F_SETLKW, &fl);
70240- if (fl.fl_ops && fl.fl_ops->fl_release_private)
70241- fl.fl_ops->fl_release_private(&fl);
70242+ filp->f_op->flock(filp, F_SETLKW, &flock);
70243+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
70244+ flock.fl_ops->fl_release_private(&flock);
70245 }
70246
70247 lock_kernel();
70248diff --git a/fs/mbcache.c b/fs/mbcache.c
70249index ec88ff3..b843a82 100644
70250--- a/fs/mbcache.c
70251+++ b/fs/mbcache.c
70252@@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
70253 if (!cache)
70254 goto fail;
70255 cache->c_name = name;
70256- cache->c_op.free = NULL;
70257+ *(void **)&cache->c_op.free = NULL;
70258 if (cache_op)
70259- cache->c_op.free = cache_op->free;
70260+ *(void **)&cache->c_op.free = cache_op->free;
70261 atomic_set(&cache->c_entry_count, 0);
70262 cache->c_bucket_bits = bucket_bits;
70263 #ifdef MB_CACHE_INDEXES_COUNT
70264diff --git a/fs/namei.c b/fs/namei.c
70265index b0afbd4..8d065a1 100644
70266--- a/fs/namei.c
70267+++ b/fs/namei.c
70268@@ -224,6 +224,14 @@ int generic_permission(struct inode *inode, int mask,
70269 return ret;
70270
70271 /*
70272+ * Searching includes executable on directories, else just read.
70273+ */
70274+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
70275+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
70276+ if (capable(CAP_DAC_READ_SEARCH))
70277+ return 0;
70278+
70279+ /*
70280 * Read/write DACs are always overridable.
70281 * Executable DACs are overridable if at least one exec bit is set.
70282 */
70283@@ -231,14 +239,6 @@ int generic_permission(struct inode *inode, int mask,
70284 if (capable(CAP_DAC_OVERRIDE))
70285 return 0;
70286
70287- /*
70288- * Searching includes executable on directories, else just read.
70289- */
70290- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
70291- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
70292- if (capable(CAP_DAC_READ_SEARCH))
70293- return 0;
70294-
70295 return -EACCES;
70296 }
70297
70298@@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode)
70299 if (!ret)
70300 goto ok;
70301
70302- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
70303+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
70304+ capable(CAP_DAC_OVERRIDE))
70305 goto ok;
70306
70307 return ret;
70308@@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
70309 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
70310 error = PTR_ERR(cookie);
70311 if (!IS_ERR(cookie)) {
70312- char *s = nd_get_link(nd);
70313+ const char *s = nd_get_link(nd);
70314 error = 0;
70315 if (s)
70316 error = __vfs_follow_link(nd, s);
70317@@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
70318 err = security_inode_follow_link(path->dentry, nd);
70319 if (err)
70320 goto loop;
70321+
70322+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
70323+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
70324+ err = -EACCES;
70325+ goto loop;
70326+ }
70327+
70328 current->link_count++;
70329 current->total_link_count++;
70330 nd->depth++;
70331@@ -1016,11 +1024,19 @@ return_reval:
70332 break;
70333 }
70334 return_base:
70335+ if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
70336+ !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
70337+ path_put(&nd->path);
70338+ return -ENOENT;
70339+ }
70340 return 0;
70341 out_dput:
70342 path_put_conditional(&next, nd);
70343 break;
70344 }
70345+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
70346+ err = -ENOENT;
70347+
70348 path_put(&nd->path);
70349 return_err:
70350 return err;
70351@@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name,
70352 int retval = path_init(dfd, name, flags, nd);
70353 if (!retval)
70354 retval = path_walk(name, nd);
70355- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
70356- nd->path.dentry->d_inode))
70357- audit_inode(name, nd->path.dentry);
70358+
70359+ if (likely(!retval)) {
70360+ if (nd->path.dentry && nd->path.dentry->d_inode) {
70361+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
70362+ retval = -ENOENT;
70363+ if (!audit_dummy_context())
70364+ audit_inode(name, nd->path.dentry);
70365+ }
70366+ }
70367 if (nd->root.mnt) {
70368 path_put(&nd->root);
70369 nd->root.mnt = NULL;
70370 }
70371+
70372 return retval;
70373 }
70374
70375@@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag)
70376 if (error)
70377 goto err_out;
70378
70379+
70380+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
70381+ error = -EPERM;
70382+ goto err_out;
70383+ }
70384+ if (gr_handle_rawio(inode)) {
70385+ error = -EPERM;
70386+ goto err_out;
70387+ }
70388+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
70389+ error = -EACCES;
70390+ goto err_out;
70391+ }
70392+
70393 if (flag & O_TRUNC) {
70394 error = get_write_access(inode);
70395 if (error)
70396@@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
70397 {
70398 int error;
70399 struct dentry *dir = nd->path.dentry;
70400+ int acc_mode = ACC_MODE(flag);
70401+
70402+ if (flag & O_TRUNC)
70403+ acc_mode |= MAY_WRITE;
70404+ if (flag & O_APPEND)
70405+ acc_mode |= MAY_APPEND;
70406+
70407+ if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) {
70408+ error = -EACCES;
70409+ goto out_unlock;
70410+ }
70411
70412 if (!IS_POSIXACL(dir->d_inode))
70413 mode &= ~current_umask();
70414@@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
70415 if (error)
70416 goto out_unlock;
70417 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
70418+ if (!error)
70419+ gr_handle_create(path->dentry, nd->path.mnt);
70420 out_unlock:
70421 mutex_unlock(&dir->d_inode->i_mutex);
70422 dput(nd->path.dentry);
70423@@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname,
70424 &nd, flag);
70425 if (error)
70426 return ERR_PTR(error);
70427+
70428+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
70429+ error = -EPERM;
70430+ goto exit;
70431+ }
70432+
70433+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
70434+ error = -EPERM;
70435+ goto exit;
70436+ }
70437+
70438+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) {
70439+ error = -EACCES;
70440+ goto exit;
70441+ }
70442+
70443 goto ok;
70444 }
70445
70446@@ -1795,6 +1861,19 @@ do_last:
70447 /*
70448 * It already exists.
70449 */
70450+
70451+ if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
70452+ error = -ENOENT;
70453+ goto exit_mutex_unlock;
70454+ }
70455+
70456+ /* only check if O_CREAT is specified, all other checks need
70457+ to go into may_open */
70458+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
70459+ error = -EACCES;
70460+ goto exit_mutex_unlock;
70461+ }
70462+
70463 mutex_unlock(&dir->d_inode->i_mutex);
70464 audit_inode(pathname, path.dentry);
70465
70466@@ -1887,6 +1966,13 @@ do_link:
70467 error = security_inode_follow_link(path.dentry, &nd);
70468 if (error)
70469 goto exit_dput;
70470+
70471+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
70472+ path.dentry, nd.path.mnt)) {
70473+ error = -EACCES;
70474+ goto exit_dput;
70475+ }
70476+
70477 error = __do_follow_link(&path, &nd);
70478 if (error) {
70479 /* Does someone understand code flow here? Or it is only
70480@@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
70481 }
70482 return dentry;
70483 eexist:
70484+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
70485+ dput(dentry);
70486+ return ERR_PTR(-ENOENT);
70487+ }
70488 dput(dentry);
70489 dentry = ERR_PTR(-EEXIST);
70490 fail:
70491@@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
70492 error = may_mknod(mode);
70493 if (error)
70494 goto out_dput;
70495+
70496+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
70497+ error = -EPERM;
70498+ goto out_dput;
70499+ }
70500+
70501+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
70502+ error = -EACCES;
70503+ goto out_dput;
70504+ }
70505+
70506 error = mnt_want_write(nd.path.mnt);
70507 if (error)
70508 goto out_dput;
70509@@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
70510 }
70511 out_drop_write:
70512 mnt_drop_write(nd.path.mnt);
70513+
70514+ if (!error)
70515+ gr_handle_create(dentry, nd.path.mnt);
70516 out_dput:
70517 dput(dentry);
70518 out_unlock:
70519@@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
70520 if (IS_ERR(dentry))
70521 goto out_unlock;
70522
70523+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
70524+ error = -EACCES;
70525+ goto out_dput;
70526+ }
70527+
70528 if (!IS_POSIXACL(nd.path.dentry->d_inode))
70529 mode &= ~current_umask();
70530 error = mnt_want_write(nd.path.mnt);
70531@@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
70532 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
70533 out_drop_write:
70534 mnt_drop_write(nd.path.mnt);
70535+
70536+ if (!error)
70537+ gr_handle_create(dentry, nd.path.mnt);
70538+
70539 out_dput:
70540 dput(dentry);
70541 out_unlock:
70542@@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
70543 char * name;
70544 struct dentry *dentry;
70545 struct nameidata nd;
70546+ ino_t saved_ino = 0;
70547+ dev_t saved_dev = 0;
70548
70549 error = user_path_parent(dfd, pathname, &nd, &name);
70550 if (error)
70551@@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
70552 error = PTR_ERR(dentry);
70553 if (IS_ERR(dentry))
70554 goto exit2;
70555+
70556+ if (dentry->d_inode != NULL) {
70557+ saved_ino = dentry->d_inode->i_ino;
70558+ saved_dev = gr_get_dev_from_dentry(dentry);
70559+
70560+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
70561+ error = -EACCES;
70562+ goto exit3;
70563+ }
70564+ }
70565+
70566 error = mnt_want_write(nd.path.mnt);
70567 if (error)
70568 goto exit3;
70569@@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
70570 if (error)
70571 goto exit4;
70572 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
70573+ if (!error && (saved_dev || saved_ino))
70574+ gr_handle_delete(saved_ino, saved_dev);
70575 exit4:
70576 mnt_drop_write(nd.path.mnt);
70577 exit3:
70578@@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
70579 struct dentry *dentry;
70580 struct nameidata nd;
70581 struct inode *inode = NULL;
70582+ ino_t saved_ino = 0;
70583+ dev_t saved_dev = 0;
70584
70585 error = user_path_parent(dfd, pathname, &nd, &name);
70586 if (error)
70587@@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname)
70588 if (nd.last.name[nd.last.len])
70589 goto slashes;
70590 inode = dentry->d_inode;
70591- if (inode)
70592+ if (inode) {
70593+ if (inode->i_nlink <= 1) {
70594+ saved_ino = inode->i_ino;
70595+ saved_dev = gr_get_dev_from_dentry(dentry);
70596+ }
70597+
70598 atomic_inc(&inode->i_count);
70599+
70600+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
70601+ error = -EACCES;
70602+ goto exit2;
70603+ }
70604+ }
70605 error = mnt_want_write(nd.path.mnt);
70606 if (error)
70607 goto exit2;
70608@@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
70609 if (error)
70610 goto exit3;
70611 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
70612+ if (!error && (saved_ino || saved_dev))
70613+ gr_handle_delete(saved_ino, saved_dev);
70614 exit3:
70615 mnt_drop_write(nd.path.mnt);
70616 exit2:
70617@@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
70618 if (IS_ERR(dentry))
70619 goto out_unlock;
70620
70621+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
70622+ error = -EACCES;
70623+ goto out_dput;
70624+ }
70625+
70626 error = mnt_want_write(nd.path.mnt);
70627 if (error)
70628 goto out_dput;
70629@@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
70630 if (error)
70631 goto out_drop_write;
70632 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
70633+ if (!error)
70634+ gr_handle_create(dentry, nd.path.mnt);
70635 out_drop_write:
70636 mnt_drop_write(nd.path.mnt);
70637 out_dput:
70638@@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
70639 error = PTR_ERR(new_dentry);
70640 if (IS_ERR(new_dentry))
70641 goto out_unlock;
70642+
70643+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
70644+ old_path.dentry->d_inode,
70645+ old_path.dentry->d_inode->i_mode, to)) {
70646+ error = -EACCES;
70647+ goto out_dput;
70648+ }
70649+
70650+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
70651+ old_path.dentry, old_path.mnt, to)) {
70652+ error = -EACCES;
70653+ goto out_dput;
70654+ }
70655+
70656 error = mnt_want_write(nd.path.mnt);
70657 if (error)
70658 goto out_dput;
70659@@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
70660 if (error)
70661 goto out_drop_write;
70662 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
70663+ if (!error)
70664+ gr_handle_create(new_dentry, nd.path.mnt);
70665 out_drop_write:
70666 mnt_drop_write(nd.path.mnt);
70667 out_dput:
70668@@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
70669 char *to;
70670 int error;
70671
70672+ pax_track_stack();
70673+
70674 error = user_path_parent(olddfd, oldname, &oldnd, &from);
70675 if (error)
70676 goto exit;
70677@@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
70678 if (new_dentry == trap)
70679 goto exit5;
70680
70681+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
70682+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
70683+ to);
70684+ if (error)
70685+ goto exit5;
70686+
70687 error = mnt_want_write(oldnd.path.mnt);
70688 if (error)
70689 goto exit5;
70690@@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
70691 goto exit6;
70692 error = vfs_rename(old_dir->d_inode, old_dentry,
70693 new_dir->d_inode, new_dentry);
70694+ if (!error)
70695+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
70696+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
70697 exit6:
70698 mnt_drop_write(oldnd.path.mnt);
70699 exit5:
70700@@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
70701
70702 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
70703 {
70704+ char tmpbuf[64];
70705+ const char *newlink;
70706 int len;
70707
70708 len = PTR_ERR(link);
70709@@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
70710 len = strlen(link);
70711 if (len > (unsigned) buflen)
70712 len = buflen;
70713- if (copy_to_user(buffer, link, len))
70714+
70715+ if (len < sizeof(tmpbuf)) {
70716+ memcpy(tmpbuf, link, len);
70717+ newlink = tmpbuf;
70718+ } else
70719+ newlink = link;
70720+
70721+ if (copy_to_user(buffer, newlink, len))
70722 len = -EFAULT;
70723 out:
70724 return len;
70725diff --git a/fs/namespace.c b/fs/namespace.c
70726index 2beb0fb..11a95a5 100644
70727--- a/fs/namespace.c
70728+++ b/fs/namespace.c
70729@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
70730 if (!(sb->s_flags & MS_RDONLY))
70731 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
70732 up_write(&sb->s_umount);
70733+
70734+ gr_log_remount(mnt->mnt_devname, retval);
70735+
70736 return retval;
70737 }
70738
70739@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
70740 security_sb_umount_busy(mnt);
70741 up_write(&namespace_sem);
70742 release_mounts(&umount_list);
70743+
70744+ gr_log_unmount(mnt->mnt_devname, retval);
70745+
70746 return retval;
70747 }
70748
70749@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
70750 if (retval)
70751 goto dput_out;
70752
70753+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
70754+ retval = -EPERM;
70755+ goto dput_out;
70756+ }
70757+
70758+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
70759+ retval = -EPERM;
70760+ goto dput_out;
70761+ }
70762+
70763 if (flags & MS_REMOUNT)
70764 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
70765 data_page);
70766@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
70767 dev_name, data_page);
70768 dput_out:
70769 path_put(&path);
70770+
70771+ gr_log_mount(dev_name, dir_name, retval);
70772+
70773 return retval;
70774 }
70775
70776@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
70777 goto out1;
70778 }
70779
70780+ if (gr_handle_chroot_pivot()) {
70781+ error = -EPERM;
70782+ path_put(&old);
70783+ goto out1;
70784+ }
70785+
70786 read_lock(&current->fs->lock);
70787 root = current->fs->root;
70788 path_get(&current->fs->root);
70789diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
70790index b8b5b30..2bd9ccb 100644
70791--- a/fs/ncpfs/dir.c
70792+++ b/fs/ncpfs/dir.c
70793@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry)
70794 int res, val = 0, len;
70795 __u8 __name[NCP_MAXPATHLEN + 1];
70796
70797+ pax_track_stack();
70798+
70799 parent = dget_parent(dentry);
70800 dir = parent->d_inode;
70801
70802@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
70803 int error, res, len;
70804 __u8 __name[NCP_MAXPATHLEN + 1];
70805
70806+ pax_track_stack();
70807+
70808 lock_kernel();
70809 error = -EIO;
70810 if (!ncp_conn_valid(server))
70811@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
70812 int error, result, len;
70813 int opmode;
70814 __u8 __name[NCP_MAXPATHLEN + 1];
70815-
70816+
70817 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
70818 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
70819
70820+ pax_track_stack();
70821+
70822 error = -EIO;
70823 lock_kernel();
70824 if (!ncp_conn_valid(server))
70825@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
70826 int error, len;
70827 __u8 __name[NCP_MAXPATHLEN + 1];
70828
70829+ pax_track_stack();
70830+
70831 DPRINTK("ncp_mkdir: making %s/%s\n",
70832 dentry->d_parent->d_name.name, dentry->d_name.name);
70833
70834@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
70835 if (!ncp_conn_valid(server))
70836 goto out;
70837
70838+ pax_track_stack();
70839+
70840 ncp_age_dentry(server, dentry);
70841 len = sizeof(__name);
70842 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
70843@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
70844 int old_len, new_len;
70845 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
70846
70847+ pax_track_stack();
70848+
70849 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
70850 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
70851 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
70852diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
70853index cf98da1..da890a9 100644
70854--- a/fs/ncpfs/inode.c
70855+++ b/fs/ncpfs/inode.c
70856@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
70857 #endif
70858 struct ncp_entry_info finfo;
70859
70860+ pax_track_stack();
70861+
70862 data.wdog_pid = NULL;
70863 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
70864 if (!server)
70865diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
70866index 2441d1a..96882c1 100644
70867--- a/fs/ncpfs/ncplib_kernel.h
70868+++ b/fs/ncpfs/ncplib_kernel.h
70869@@ -131,7 +131,7 @@ static inline int ncp_is_nfs_extras(struct ncp_server* server, unsigned int voln
70870 int ncp__io2vol(struct ncp_server *, unsigned char *, unsigned int *,
70871 const unsigned char *, unsigned int, int);
70872 int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *,
70873- const unsigned char *, unsigned int, int);
70874+ const unsigned char *, unsigned int, int) __size_overflow(5);
70875
70876 #define NCP_ESC ':'
70877 #define NCP_IO_TABLE(dentry) (NCP_SERVER((dentry)->d_inode)->nls_io)
70878@@ -147,7 +147,7 @@ int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *,
70879 int ncp__io2vol(unsigned char *, unsigned int *,
70880 const unsigned char *, unsigned int, int);
70881 int ncp__vol2io(unsigned char *, unsigned int *,
70882- const unsigned char *, unsigned int, int);
70883+ const unsigned char *, unsigned int, int) __size_overflow(5);
70884
70885 #define NCP_IO_TABLE(dentry) NULL
70886 #define ncp_tolower(t, c) tolower(c)
70887diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
70888index bfaef7b..e9d03ca 100644
70889--- a/fs/nfs/inode.c
70890+++ b/fs/nfs/inode.c
70891@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
70892 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
70893 nfsi->attrtimeo_timestamp = jiffies;
70894
70895- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
70896+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
70897 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
70898 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
70899 else
70900@@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
70901 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
70902 }
70903
70904-static atomic_long_t nfs_attr_generation_counter;
70905+static atomic_long_unchecked_t nfs_attr_generation_counter;
70906
70907 static unsigned long nfs_read_attr_generation_counter(void)
70908 {
70909- return atomic_long_read(&nfs_attr_generation_counter);
70910+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
70911 }
70912
70913 unsigned long nfs_inc_attr_generation_counter(void)
70914 {
70915- return atomic_long_inc_return(&nfs_attr_generation_counter);
70916+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
70917 }
70918
70919 void nfs_fattr_init(struct nfs_fattr *fattr)
70920diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
70921index cc2f505..f6a236f 100644
70922--- a/fs/nfsd/lockd.c
70923+++ b/fs/nfsd/lockd.c
70924@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
70925 fput(filp);
70926 }
70927
70928-static struct nlmsvc_binding nfsd_nlm_ops = {
70929+static const struct nlmsvc_binding nfsd_nlm_ops = {
70930 .fopen = nlm_fopen, /* open file for locking */
70931 .fclose = nlm_fclose, /* close file */
70932 };
70933diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
70934index cfc3391..dcc083a 100644
70935--- a/fs/nfsd/nfs4state.c
70936+++ b/fs/nfsd/nfs4state.c
70937@@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
70938 unsigned int cmd;
70939 int err;
70940
70941+ pax_track_stack();
70942+
70943 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
70944 (long long) lock->lk_offset,
70945 (long long) lock->lk_length);
70946diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
70947index 4a82a96..0d5fb49 100644
70948--- a/fs/nfsd/nfs4xdr.c
70949+++ b/fs/nfsd/nfs4xdr.c
70950@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
70951 struct nfsd4_compoundres *resp = rqstp->rq_resp;
70952 u32 minorversion = resp->cstate.minorversion;
70953
70954+ pax_track_stack();
70955+
70956 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
70957 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
70958 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
70959diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
70960index 2e09588..596421d 100644
70961--- a/fs/nfsd/vfs.c
70962+++ b/fs/nfsd/vfs.c
70963@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
70964 } else {
70965 oldfs = get_fs();
70966 set_fs(KERNEL_DS);
70967- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
70968+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
70969 set_fs(oldfs);
70970 }
70971
70972@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
70973
70974 /* Write the data. */
70975 oldfs = get_fs(); set_fs(KERNEL_DS);
70976- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
70977+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
70978 set_fs(oldfs);
70979 if (host_err < 0)
70980 goto out_nfserr;
70981@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
70982 */
70983
70984 oldfs = get_fs(); set_fs(KERNEL_DS);
70985- host_err = inode->i_op->readlink(dentry, buf, *lenp);
70986+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
70987 set_fs(oldfs);
70988
70989 if (host_err < 0)
70990diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
70991index f6af760..d0adf34 100644
70992--- a/fs/nilfs2/ioctl.c
70993+++ b/fs/nilfs2/ioctl.c
70994@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
70995 unsigned int cmd, void __user *argp)
70996 {
70997 struct nilfs_argv argv[5];
70998- const static size_t argsz[5] = {
70999+ static const size_t argsz[5] = {
71000 sizeof(struct nilfs_vdesc),
71001 sizeof(struct nilfs_period),
71002 sizeof(__u64),
71003@@ -522,6 +522,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
71004 if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
71005 goto out_free;
71006
71007+ if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
71008+ goto out_free;
71009+
71010 len = argv[n].v_size * argv[n].v_nmembs;
71011 base = (void __user *)(unsigned long)argv[n].v_base;
71012 if (len == 0) {
71013diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
71014index 7e54e52..9337248 100644
71015--- a/fs/notify/dnotify/dnotify.c
71016+++ b/fs/notify/dnotify/dnotify.c
71017@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
71018 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
71019 }
71020
71021-static struct fsnotify_ops dnotify_fsnotify_ops = {
71022+static const struct fsnotify_ops dnotify_fsnotify_ops = {
71023 .handle_event = dnotify_handle_event,
71024 .should_send_event = dnotify_should_send_event,
71025 .free_group_priv = NULL,
71026diff --git a/fs/notify/notification.c b/fs/notify/notification.c
71027index b8bf53b..c518688 100644
71028--- a/fs/notify/notification.c
71029+++ b/fs/notify/notification.c
71030@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
71031 * get set to 0 so it will never get 'freed'
71032 */
71033 static struct fsnotify_event q_overflow_event;
71034-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
71035+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
71036
71037 /**
71038 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
71039@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
71040 */
71041 u32 fsnotify_get_cookie(void)
71042 {
71043- return atomic_inc_return(&fsnotify_sync_cookie);
71044+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
71045 }
71046 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
71047
71048diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
71049index 5a9e344..0f8cd28 100644
71050--- a/fs/ntfs/dir.c
71051+++ b/fs/ntfs/dir.c
71052@@ -1328,7 +1328,7 @@ find_next_index_buffer:
71053 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
71054 ~(s64)(ndir->itype.index.block_size - 1)));
71055 /* Bounds checks. */
71056- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
71057+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
71058 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
71059 "inode 0x%lx or driver bug.", vdir->i_ino);
71060 goto err_out;
71061diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
71062index 663c0e3..b6868e9 100644
71063--- a/fs/ntfs/file.c
71064+++ b/fs/ntfs/file.c
71065@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = {
71066 #endif /* NTFS_RW */
71067 };
71068
71069-const struct file_operations ntfs_empty_file_ops = {};
71070+const struct file_operations ntfs_empty_file_ops __read_only;
71071
71072-const struct inode_operations ntfs_empty_inode_ops = {};
71073+const struct inode_operations ntfs_empty_inode_ops __read_only;
71074diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
71075index 1cd2934..880b5d2 100644
71076--- a/fs/ocfs2/cluster/masklog.c
71077+++ b/fs/ocfs2/cluster/masklog.c
71078@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
71079 return mlog_mask_store(mlog_attr->mask, buf, count);
71080 }
71081
71082-static struct sysfs_ops mlog_attr_ops = {
71083+static const struct sysfs_ops mlog_attr_ops = {
71084 .show = mlog_show,
71085 .store = mlog_store,
71086 };
71087diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
71088index ac10f83..2cd2607 100644
71089--- a/fs/ocfs2/localalloc.c
71090+++ b/fs/ocfs2/localalloc.c
71091@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
71092 goto bail;
71093 }
71094
71095- atomic_inc(&osb->alloc_stats.moves);
71096+ atomic_inc_unchecked(&osb->alloc_stats.moves);
71097
71098 status = 0;
71099 bail:
71100diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
71101index f010b22..9f9ed34 100644
71102--- a/fs/ocfs2/namei.c
71103+++ b/fs/ocfs2/namei.c
71104@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir,
71105 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
71106 struct ocfs2_dir_lookup_result target_insert = { NULL, };
71107
71108+ pax_track_stack();
71109+
71110 /* At some point it might be nice to break this function up a
71111 * bit. */
71112
71113diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
71114index d963d86..914cfbd 100644
71115--- a/fs/ocfs2/ocfs2.h
71116+++ b/fs/ocfs2/ocfs2.h
71117@@ -217,11 +217,11 @@ enum ocfs2_vol_state
71118
71119 struct ocfs2_alloc_stats
71120 {
71121- atomic_t moves;
71122- atomic_t local_data;
71123- atomic_t bitmap_data;
71124- atomic_t bg_allocs;
71125- atomic_t bg_extends;
71126+ atomic_unchecked_t moves;
71127+ atomic_unchecked_t local_data;
71128+ atomic_unchecked_t bitmap_data;
71129+ atomic_unchecked_t bg_allocs;
71130+ atomic_unchecked_t bg_extends;
71131 };
71132
71133 enum ocfs2_local_alloc_state
71134diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
71135index 79b5dac..d322952 100644
71136--- a/fs/ocfs2/suballoc.c
71137+++ b/fs/ocfs2/suballoc.c
71138@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
71139 mlog_errno(status);
71140 goto bail;
71141 }
71142- atomic_inc(&osb->alloc_stats.bg_extends);
71143+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
71144
71145 /* You should never ask for this much metadata */
71146 BUG_ON(bits_wanted >
71147@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
71148 mlog_errno(status);
71149 goto bail;
71150 }
71151- atomic_inc(&osb->alloc_stats.bg_allocs);
71152+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
71153
71154 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
71155 ac->ac_bits_given += (*num_bits);
71156@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
71157 mlog_errno(status);
71158 goto bail;
71159 }
71160- atomic_inc(&osb->alloc_stats.bg_allocs);
71161+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
71162
71163 BUG_ON(num_bits != 1);
71164
71165@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
71166 cluster_start,
71167 num_clusters);
71168 if (!status)
71169- atomic_inc(&osb->alloc_stats.local_data);
71170+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
71171 } else {
71172 if (min_clusters > (osb->bitmap_cpg - 1)) {
71173 /* The only paths asking for contiguousness
71174@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
71175 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
71176 bg_blkno,
71177 bg_bit_off);
71178- atomic_inc(&osb->alloc_stats.bitmap_data);
71179+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
71180 }
71181 }
71182 if (status < 0) {
71183diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
71184index 9f55be4..a3f8048 100644
71185--- a/fs/ocfs2/super.c
71186+++ b/fs/ocfs2/super.c
71187@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
71188 "%10s => GlobalAllocs: %d LocalAllocs: %d "
71189 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
71190 "Stats",
71191- atomic_read(&osb->alloc_stats.bitmap_data),
71192- atomic_read(&osb->alloc_stats.local_data),
71193- atomic_read(&osb->alloc_stats.bg_allocs),
71194- atomic_read(&osb->alloc_stats.moves),
71195- atomic_read(&osb->alloc_stats.bg_extends));
71196+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
71197+ atomic_read_unchecked(&osb->alloc_stats.local_data),
71198+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
71199+ atomic_read_unchecked(&osb->alloc_stats.moves),
71200+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
71201
71202 out += snprintf(buf + out, len - out,
71203 "%10s => State: %u Descriptor: %llu Size: %u bits "
71204@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
71205 spin_lock_init(&osb->osb_xattr_lock);
71206 ocfs2_init_inode_steal_slot(osb);
71207
71208- atomic_set(&osb->alloc_stats.moves, 0);
71209- atomic_set(&osb->alloc_stats.local_data, 0);
71210- atomic_set(&osb->alloc_stats.bitmap_data, 0);
71211- atomic_set(&osb->alloc_stats.bg_allocs, 0);
71212- atomic_set(&osb->alloc_stats.bg_extends, 0);
71213+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
71214+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
71215+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
71216+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
71217+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
71218
71219 /* Copy the blockcheck stats from the superblock probe */
71220 osb->osb_ecc_stats = *stats;
71221diff --git a/fs/open.c b/fs/open.c
71222index 4f01e06..2a8057a 100644
71223--- a/fs/open.c
71224+++ b/fs/open.c
71225@@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
71226 error = locks_verify_truncate(inode, NULL, length);
71227 if (!error)
71228 error = security_path_truncate(&path, length, 0);
71229+
71230+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
71231+ error = -EACCES;
71232+
71233 if (!error) {
71234 vfs_dq_init(inode);
71235 error = do_truncate(path.dentry, length, 0, NULL);
71236@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
71237 if (__mnt_is_readonly(path.mnt))
71238 res = -EROFS;
71239
71240+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
71241+ res = -EACCES;
71242+
71243 out_path_release:
71244 path_put(&path);
71245 out:
71246@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
71247 if (error)
71248 goto dput_and_out;
71249
71250+ gr_log_chdir(path.dentry, path.mnt);
71251+
71252 set_fs_pwd(current->fs, &path);
71253
71254 dput_and_out:
71255@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
71256 goto out_putf;
71257
71258 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
71259+
71260+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
71261+ error = -EPERM;
71262+
71263+ if (!error)
71264+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
71265+
71266 if (!error)
71267 set_fs_pwd(current->fs, &file->f_path);
71268 out_putf:
71269@@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
71270 if (!capable(CAP_SYS_CHROOT))
71271 goto dput_and_out;
71272
71273+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
71274+ goto dput_and_out;
71275+
71276 set_fs_root(current->fs, &path);
71277+
71278+ gr_handle_chroot_chdir(&path);
71279+
71280 error = 0;
71281 dput_and_out:
71282 path_put(&path);
71283@@ -596,66 +618,57 @@ out:
71284 return error;
71285 }
71286
71287+static int chmod_common(struct path *path, umode_t mode)
71288+{
71289+ struct inode *inode = path->dentry->d_inode;
71290+ struct iattr newattrs;
71291+ int error;
71292+
71293+ error = mnt_want_write(path->mnt);
71294+ if (error)
71295+ return error;
71296+ mutex_lock(&inode->i_mutex);
71297+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
71298+ error = -EACCES;
71299+ goto out_unlock;
71300+ }
71301+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
71302+ error = -EPERM;
71303+ goto out_unlock;
71304+ }
71305+ newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
71306+ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
71307+ error = notify_change(path->dentry, &newattrs);
71308+out_unlock:
71309+ mutex_unlock(&inode->i_mutex);
71310+ mnt_drop_write(path->mnt);
71311+ return error;
71312+}
71313+
71314 SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
71315 {
71316- struct inode * inode;
71317- struct dentry * dentry;
71318 struct file * file;
71319 int err = -EBADF;
71320- struct iattr newattrs;
71321
71322 file = fget(fd);
71323- if (!file)
71324- goto out;
71325-
71326- dentry = file->f_path.dentry;
71327- inode = dentry->d_inode;
71328-
71329- audit_inode(NULL, dentry);
71330-
71331- err = mnt_want_write_file(file);
71332- if (err)
71333- goto out_putf;
71334- mutex_lock(&inode->i_mutex);
71335- if (mode == (mode_t) -1)
71336- mode = inode->i_mode;
71337- newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
71338- newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
71339- err = notify_change(dentry, &newattrs);
71340- mutex_unlock(&inode->i_mutex);
71341- mnt_drop_write(file->f_path.mnt);
71342-out_putf:
71343- fput(file);
71344-out:
71345+ if (file) {
71346+ audit_inode(NULL, file->f_path.dentry);
71347+ err = chmod_common(&file->f_path, mode);
71348+ fput(file);
71349+ }
71350 return err;
71351 }
71352
71353 SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
71354 {
71355 struct path path;
71356- struct inode *inode;
71357 int error;
71358- struct iattr newattrs;
71359
71360 error = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path);
71361- if (error)
71362- goto out;
71363- inode = path.dentry->d_inode;
71364-
71365- error = mnt_want_write(path.mnt);
71366- if (error)
71367- goto dput_and_out;
71368- mutex_lock(&inode->i_mutex);
71369- if (mode == (mode_t) -1)
71370- mode = inode->i_mode;
71371- newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
71372- newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
71373- error = notify_change(path.dentry, &newattrs);
71374- mutex_unlock(&inode->i_mutex);
71375- mnt_drop_write(path.mnt);
71376-dput_and_out:
71377- path_put(&path);
71378-out:
71379+ if (!error) {
71380+ error = chmod_common(&path, mode);
71381+ path_put(&path);
71382+ }
71383 return error;
71384 }
71385
71386@@ -664,12 +677,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
71387 return sys_fchmodat(AT_FDCWD, filename, mode);
71388 }
71389
71390-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
71391+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
71392 {
71393 struct inode *inode = dentry->d_inode;
71394 int error;
71395 struct iattr newattrs;
71396
71397+ if (!gr_acl_handle_chown(dentry, mnt))
71398+ return -EACCES;
71399+
71400 newattrs.ia_valid = ATTR_CTIME;
71401 if (user != (uid_t) -1) {
71402 newattrs.ia_valid |= ATTR_UID;
71403@@ -700,7 +716,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
71404 error = mnt_want_write(path.mnt);
71405 if (error)
71406 goto out_release;
71407- error = chown_common(path.dentry, user, group);
71408+ error = chown_common(path.dentry, user, group, path.mnt);
71409 mnt_drop_write(path.mnt);
71410 out_release:
71411 path_put(&path);
71412@@ -725,7 +741,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
71413 error = mnt_want_write(path.mnt);
71414 if (error)
71415 goto out_release;
71416- error = chown_common(path.dentry, user, group);
71417+ error = chown_common(path.dentry, user, group, path.mnt);
71418 mnt_drop_write(path.mnt);
71419 out_release:
71420 path_put(&path);
71421@@ -744,7 +760,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
71422 error = mnt_want_write(path.mnt);
71423 if (error)
71424 goto out_release;
71425- error = chown_common(path.dentry, user, group);
71426+ error = chown_common(path.dentry, user, group, path.mnt);
71427 mnt_drop_write(path.mnt);
71428 out_release:
71429 path_put(&path);
71430@@ -767,7 +783,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
71431 goto out_fput;
71432 dentry = file->f_path.dentry;
71433 audit_inode(NULL, dentry);
71434- error = chown_common(dentry, user, group);
71435+ error = chown_common(dentry, user, group, file->f_path.mnt);
71436 mnt_drop_write(file->f_path.mnt);
71437 out_fput:
71438 fput(file);
71439@@ -1036,7 +1052,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
71440 if (!IS_ERR(tmp)) {
71441 fd = get_unused_fd_flags(flags);
71442 if (fd >= 0) {
71443- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
71444+ struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
71445 if (IS_ERR(f)) {
71446 put_unused_fd(fd);
71447 fd = PTR_ERR(f);
71448diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
71449index 6ab70f4..f4103d1 100644
71450--- a/fs/partitions/efi.c
71451+++ b/fs/partitions/efi.c
71452@@ -231,14 +231,14 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
71453 if (!bdev || !gpt)
71454 return NULL;
71455
71456+ if (!le32_to_cpu(gpt->num_partition_entries))
71457+ return NULL;
71458+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
71459+ if (!pte)
71460+ return NULL;
71461+
71462 count = le32_to_cpu(gpt->num_partition_entries) *
71463 le32_to_cpu(gpt->sizeof_partition_entry);
71464- if (!count)
71465- return NULL;
71466- pte = kzalloc(count, GFP_KERNEL);
71467- if (!pte)
71468- return NULL;
71469-
71470 if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
71471 (u8 *) pte,
71472 count) < count) {
71473diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
71474index dd6efdb..3babc6c 100644
71475--- a/fs/partitions/ldm.c
71476+++ b/fs/partitions/ldm.c
71477@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
71478 ldm_error ("A VBLK claims to have %d parts.", num);
71479 return false;
71480 }
71481+
71482 if (rec >= num) {
71483 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
71484 return false;
71485@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
71486 goto found;
71487 }
71488
71489- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
71490+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
71491 if (!f) {
71492 ldm_crit ("Out of memory.");
71493 return false;
71494diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
71495index 5765198..7f8e9e0 100644
71496--- a/fs/partitions/mac.c
71497+++ b/fs/partitions/mac.c
71498@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
71499 return 0; /* not a MacOS disk */
71500 }
71501 blocks_in_map = be32_to_cpu(part->map_count);
71502- if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
71503- put_dev_sector(sect);
71504- return 0;
71505- }
71506 printk(" [mac]");
71507+ if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
71508+ put_dev_sector(sect);
71509+ return 0;
71510+ }
71511 for (slot = 1; slot <= blocks_in_map; ++slot) {
71512 int pos = slot * secsize;
71513 put_dev_sector(sect);
71514diff --git a/fs/pipe.c b/fs/pipe.c
71515index d0cc080..8a6f211 100644
71516--- a/fs/pipe.c
71517+++ b/fs/pipe.c
71518@@ -401,9 +401,9 @@ redo:
71519 }
71520 if (bufs) /* More to do? */
71521 continue;
71522- if (!pipe->writers)
71523+ if (!atomic_read(&pipe->writers))
71524 break;
71525- if (!pipe->waiting_writers) {
71526+ if (!atomic_read(&pipe->waiting_writers)) {
71527 /* syscall merging: Usually we must not sleep
71528 * if O_NONBLOCK is set, or if we got some data.
71529 * But if a writer sleeps in kernel space, then
71530@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
71531 mutex_lock(&inode->i_mutex);
71532 pipe = inode->i_pipe;
71533
71534- if (!pipe->readers) {
71535+ if (!atomic_read(&pipe->readers)) {
71536 send_sig(SIGPIPE, current, 0);
71537 ret = -EPIPE;
71538 goto out;
71539@@ -511,7 +511,7 @@ redo1:
71540 for (;;) {
71541 int bufs;
71542
71543- if (!pipe->readers) {
71544+ if (!atomic_read(&pipe->readers)) {
71545 send_sig(SIGPIPE, current, 0);
71546 if (!ret)
71547 ret = -EPIPE;
71548@@ -597,9 +597,9 @@ redo2:
71549 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
71550 do_wakeup = 0;
71551 }
71552- pipe->waiting_writers++;
71553+ atomic_inc(&pipe->waiting_writers);
71554 pipe_wait(pipe);
71555- pipe->waiting_writers--;
71556+ atomic_dec(&pipe->waiting_writers);
71557 }
71558 out:
71559 mutex_unlock(&inode->i_mutex);
71560@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait)
71561 mask = 0;
71562 if (filp->f_mode & FMODE_READ) {
71563 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
71564- if (!pipe->writers && filp->f_version != pipe->w_counter)
71565+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
71566 mask |= POLLHUP;
71567 }
71568
71569@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait)
71570 * Most Unices do not set POLLERR for FIFOs but on Linux they
71571 * behave exactly like pipes for poll().
71572 */
71573- if (!pipe->readers)
71574+ if (!atomic_read(&pipe->readers))
71575 mask |= POLLERR;
71576 }
71577
71578@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw)
71579
71580 mutex_lock(&inode->i_mutex);
71581 pipe = inode->i_pipe;
71582- pipe->readers -= decr;
71583- pipe->writers -= decw;
71584+ atomic_sub(decr, &pipe->readers);
71585+ atomic_sub(decw, &pipe->writers);
71586
71587- if (!pipe->readers && !pipe->writers) {
71588+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
71589 free_pipe_info(inode);
71590 } else {
71591 wake_up_interruptible_sync(&pipe->wait);
71592@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
71593
71594 if (inode->i_pipe) {
71595 ret = 0;
71596- inode->i_pipe->readers++;
71597+ atomic_inc(&inode->i_pipe->readers);
71598 }
71599
71600 mutex_unlock(&inode->i_mutex);
71601@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
71602
71603 if (inode->i_pipe) {
71604 ret = 0;
71605- inode->i_pipe->writers++;
71606+ atomic_inc(&inode->i_pipe->writers);
71607 }
71608
71609 mutex_unlock(&inode->i_mutex);
71610@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
71611 if (inode->i_pipe) {
71612 ret = 0;
71613 if (filp->f_mode & FMODE_READ)
71614- inode->i_pipe->readers++;
71615+ atomic_inc(&inode->i_pipe->readers);
71616 if (filp->f_mode & FMODE_WRITE)
71617- inode->i_pipe->writers++;
71618+ atomic_inc(&inode->i_pipe->writers);
71619 }
71620
71621 mutex_unlock(&inode->i_mutex);
71622@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
71623 inode->i_pipe = NULL;
71624 }
71625
71626-static struct vfsmount *pipe_mnt __read_mostly;
71627+struct vfsmount *pipe_mnt __read_mostly;
71628 static int pipefs_delete_dentry(struct dentry *dentry)
71629 {
71630 /*
71631@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void)
71632 goto fail_iput;
71633 inode->i_pipe = pipe;
71634
71635- pipe->readers = pipe->writers = 1;
71636+ atomic_set(&pipe->readers, 1);
71637+ atomic_set(&pipe->writers, 1);
71638 inode->i_fop = &rdwr_pipefifo_fops;
71639
71640 /*
71641diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
71642index 50f8f06..c5755df 100644
71643--- a/fs/proc/Kconfig
71644+++ b/fs/proc/Kconfig
71645@@ -30,12 +30,12 @@ config PROC_FS
71646
71647 config PROC_KCORE
71648 bool "/proc/kcore support" if !ARM
71649- depends on PROC_FS && MMU
71650+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
71651
71652 config PROC_VMCORE
71653 bool "/proc/vmcore support (EXPERIMENTAL)"
71654- depends on PROC_FS && CRASH_DUMP
71655- default y
71656+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
71657+ default n
71658 help
71659 Exports the dump image of crashed kernel in ELF format.
71660
71661@@ -59,8 +59,8 @@ config PROC_SYSCTL
71662 limited in memory.
71663
71664 config PROC_PAGE_MONITOR
71665- default y
71666- depends on PROC_FS && MMU
71667+ default n
71668+ depends on PROC_FS && MMU && !GRKERNSEC
71669 bool "Enable /proc page monitoring" if EMBEDDED
71670 help
71671 Various /proc files exist to monitor process memory utilization:
71672diff --git a/fs/proc/array.c b/fs/proc/array.c
71673index c5ef152..28c94f7 100644
71674--- a/fs/proc/array.c
71675+++ b/fs/proc/array.c
71676@@ -60,6 +60,7 @@
71677 #include <linux/tty.h>
71678 #include <linux/string.h>
71679 #include <linux/mman.h>
71680+#include <linux/grsecurity.h>
71681 #include <linux/proc_fs.h>
71682 #include <linux/ioport.h>
71683 #include <linux/uaccess.h>
71684@@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
71685 p->nivcsw);
71686 }
71687
71688+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
71689+static inline void task_pax(struct seq_file *m, struct task_struct *p)
71690+{
71691+ if (p->mm)
71692+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
71693+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
71694+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
71695+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
71696+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
71697+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
71698+ else
71699+ seq_printf(m, "PaX:\t-----\n");
71700+}
71701+#endif
71702+
71703 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
71704 struct pid *pid, struct task_struct *task)
71705 {
71706@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
71707 task_cap(m, task);
71708 cpuset_task_status_allowed(m, task);
71709 task_context_switch_counts(m, task);
71710+
71711+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
71712+ task_pax(m, task);
71713+#endif
71714+
71715+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
71716+ task_grsec_rbac(m, task);
71717+#endif
71718+
71719 return 0;
71720 }
71721
71722+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71723+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
71724+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
71725+ _mm->pax_flags & MF_PAX_SEGMEXEC))
71726+#endif
71727+
71728 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
71729 struct pid *pid, struct task_struct *task, int whole)
71730 {
71731@@ -358,9 +389,18 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
71732 cputime_t cutime, cstime, utime, stime;
71733 cputime_t cgtime, gtime;
71734 unsigned long rsslim = 0;
71735- char tcomm[sizeof(task->comm)];
71736+ char tcomm[sizeof(task->comm)] = { 0 };
71737 unsigned long flags;
71738
71739+ pax_track_stack();
71740+
71741+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71742+ if (current->exec_id != m->exec_id) {
71743+ gr_log_badprocpid("stat");
71744+ return 0;
71745+ }
71746+#endif
71747+
71748 state = *get_task_state(task);
71749 vsize = eip = esp = 0;
71750 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
71751@@ -433,6 +473,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
71752 gtime = task_gtime(task);
71753 }
71754
71755+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71756+ if (PAX_RAND_FLAGS(mm)) {
71757+ eip = 0;
71758+ esp = 0;
71759+ wchan = 0;
71760+ }
71761+#endif
71762+#ifdef CONFIG_GRKERNSEC_HIDESYM
71763+ wchan = 0;
71764+ eip =0;
71765+ esp =0;
71766+#endif
71767+
71768 /* scale priority and nice values from timeslices to -20..20 */
71769 /* to make it look like a "normal" Unix priority/nice value */
71770 priority = task_prio(task);
71771@@ -473,9 +526,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
71772 vsize,
71773 mm ? get_mm_rss(mm) : 0,
71774 rsslim,
71775+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71776+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
71777+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
71778+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
71779+#else
71780 mm ? (permitted ? mm->start_code : 1) : 0,
71781 mm ? (permitted ? mm->end_code : 1) : 0,
71782 (permitted && mm) ? mm->start_stack : 0,
71783+#endif
71784 esp,
71785 eip,
71786 /* The signal information here is obsolete.
71787@@ -517,8 +576,16 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
71788 struct pid *pid, struct task_struct *task)
71789 {
71790 int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
71791- struct mm_struct *mm = get_task_mm(task);
71792+ struct mm_struct *mm;
71793
71794+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71795+ if (current->exec_id != m->exec_id) {
71796+ gr_log_badprocpid("statm");
71797+ return 0;
71798+ }
71799+#endif
71800+
71801+ mm = get_task_mm(task);
71802 if (mm) {
71803 size = task_statm(mm, &shared, &text, &data, &resident);
71804 mmput(mm);
71805@@ -528,3 +595,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
71806
71807 return 0;
71808 }
71809+
71810+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
71811+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
71812+{
71813+ u32 curr_ip = 0;
71814+ unsigned long flags;
71815+
71816+ if (lock_task_sighand(task, &flags)) {
71817+ curr_ip = task->signal->curr_ip;
71818+ unlock_task_sighand(task, &flags);
71819+ }
71820+
71821+ return sprintf(buffer, "%pI4\n", &curr_ip);
71822+}
71823+#endif
71824diff --git a/fs/proc/base.c b/fs/proc/base.c
71825index 67f7dc0..a86ad9a 100644
71826--- a/fs/proc/base.c
71827+++ b/fs/proc/base.c
71828@@ -102,6 +102,22 @@ struct pid_entry {
71829 union proc_op op;
71830 };
71831
71832+struct getdents_callback {
71833+ struct linux_dirent __user * current_dir;
71834+ struct linux_dirent __user * previous;
71835+ struct file * file;
71836+ int count;
71837+ int error;
71838+};
71839+
71840+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
71841+ loff_t offset, u64 ino, unsigned int d_type)
71842+{
71843+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
71844+ buf->error = -EINVAL;
71845+ return 0;
71846+}
71847+
71848 #define NOD(NAME, MODE, IOP, FOP, OP) { \
71849 .name = (NAME), \
71850 .len = sizeof(NAME) - 1, \
71851@@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
71852 if (task == current)
71853 return 0;
71854
71855+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
71856+ return -EPERM;
71857+
71858 /*
71859 * If current is actively ptrace'ing, and would also be
71860 * permitted to freshly attach with ptrace now, permit it.
71861@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
71862 if (!mm->arg_end)
71863 goto out_mm; /* Shh! No looking before we're done */
71864
71865+ if (gr_acl_handle_procpidmem(task))
71866+ goto out_mm;
71867+
71868 len = mm->arg_end - mm->arg_start;
71869
71870 if (len > PAGE_SIZE)
71871@@ -287,12 +309,28 @@ out:
71872 return res;
71873 }
71874
71875+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71876+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
71877+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
71878+ _mm->pax_flags & MF_PAX_SEGMEXEC))
71879+#endif
71880+
71881 static int proc_pid_auxv(struct task_struct *task, char *buffer)
71882 {
71883 int res = 0;
71884 struct mm_struct *mm = get_task_mm(task);
71885 if (mm) {
71886 unsigned int nwords = 0;
71887+
71888+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71889+ /* allow if we're currently ptracing this task */
71890+ if (PAX_RAND_FLAGS(mm) &&
71891+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
71892+ mmput(mm);
71893+ return 0;
71894+ }
71895+#endif
71896+
71897 do {
71898 nwords += 2;
71899 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
71900@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
71901 }
71902
71903
71904-#ifdef CONFIG_KALLSYMS
71905+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
71906 /*
71907 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
71908 * Returns the resolved symbol. If that fails, simply return the address.
71909@@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task)
71910 mutex_unlock(&task->cred_guard_mutex);
71911 }
71912
71913-#ifdef CONFIG_STACKTRACE
71914+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
71915
71916 #define MAX_STACK_TRACE_DEPTH 64
71917
71918@@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
71919 return count;
71920 }
71921
71922-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
71923+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
71924 static int proc_pid_syscall(struct task_struct *task, char *buffer)
71925 {
71926 long nr;
71927@@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
71928 /************************************************************************/
71929
71930 /* permission checks */
71931-static int proc_fd_access_allowed(struct inode *inode)
71932+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
71933 {
71934 struct task_struct *task;
71935 int allowed = 0;
71936@@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
71937 */
71938 task = get_proc_task(inode);
71939 if (task) {
71940- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
71941+ if (log)
71942+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
71943+ else
71944+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
71945 put_task_struct(task);
71946 }
71947 return allowed;
71948@@ -806,9 +847,16 @@ static const struct file_operations proc_single_file_operations = {
71949 static int mem_open(struct inode* inode, struct file* file)
71950 {
71951 file->private_data = (void*)((long)current->self_exec_id);
71952+
71953+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71954+ file->f_version = current->exec_id;
71955+#endif
71956+
71957 return 0;
71958 }
71959
71960+static int task_dumpable(struct task_struct *task);
71961+
71962 static ssize_t mem_read(struct file * file, char __user * buf,
71963 size_t count, loff_t *ppos)
71964 {
71965@@ -818,6 +866,13 @@ static ssize_t mem_read(struct file * file, char __user * buf,
71966 int ret = -ESRCH;
71967 struct mm_struct *mm;
71968
71969+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71970+ if (file->f_version != current->exec_id) {
71971+ gr_log_badprocpid("mem");
71972+ return 0;
71973+ }
71974+#endif
71975+
71976 if (!task)
71977 goto out_no_task;
71978
71979@@ -963,6 +1018,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
71980 if (!task)
71981 goto out_no_task;
71982
71983+ if (gr_acl_handle_procpidmem(task))
71984+ goto out;
71985+
71986 if (!ptrace_may_access(task, PTRACE_MODE_READ))
71987 goto out;
71988
71989@@ -1377,7 +1435,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
71990 path_put(&nd->path);
71991
71992 /* Are we allowed to snoop on the tasks file descriptors? */
71993- if (!proc_fd_access_allowed(inode))
71994+ if (!proc_fd_access_allowed(inode,0))
71995 goto out;
71996
71997 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
71998@@ -1417,8 +1475,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
71999 struct path path;
72000
72001 /* Are we allowed to snoop on the tasks file descriptors? */
72002- if (!proc_fd_access_allowed(inode))
72003- goto out;
72004+ /* logging this is needed for learning on chromium to work properly,
72005+ but we don't want to flood the logs from 'ps' which does a readlink
72006+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
72007+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
72008+ */
72009+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
72010+ if (!proc_fd_access_allowed(inode,0))
72011+ goto out;
72012+ } else {
72013+ if (!proc_fd_access_allowed(inode,1))
72014+ goto out;
72015+ }
72016
72017 error = PROC_I(inode)->op.proc_get_link(inode, &path);
72018 if (error)
72019@@ -1483,7 +1551,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
72020 rcu_read_lock();
72021 cred = __task_cred(task);
72022 inode->i_uid = cred->euid;
72023+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72024+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
72025+#else
72026 inode->i_gid = cred->egid;
72027+#endif
72028 rcu_read_unlock();
72029 }
72030 security_task_to_inode(task, inode);
72031@@ -1501,6 +1573,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
72032 struct inode *inode = dentry->d_inode;
72033 struct task_struct *task;
72034 const struct cred *cred;
72035+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72036+ const struct cred *tmpcred = current_cred();
72037+#endif
72038
72039 generic_fillattr(inode, stat);
72040
72041@@ -1508,13 +1583,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
72042 stat->uid = 0;
72043 stat->gid = 0;
72044 task = pid_task(proc_pid(inode), PIDTYPE_PID);
72045+
72046+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
72047+ rcu_read_unlock();
72048+ return -ENOENT;
72049+ }
72050+
72051 if (task) {
72052+ cred = __task_cred(task);
72053+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72054+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
72055+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72056+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
72057+#endif
72058+ ) {
72059+#endif
72060 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
72061+#ifdef CONFIG_GRKERNSEC_PROC_USER
72062+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
72063+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72064+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
72065+#endif
72066 task_dumpable(task)) {
72067- cred = __task_cred(task);
72068 stat->uid = cred->euid;
72069+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72070+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
72071+#else
72072 stat->gid = cred->egid;
72073+#endif
72074 }
72075+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72076+ } else {
72077+ rcu_read_unlock();
72078+ return -ENOENT;
72079+ }
72080+#endif
72081 }
72082 rcu_read_unlock();
72083 return 0;
72084@@ -1545,11 +1648,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
72085
72086 if (task) {
72087 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
72088+#ifdef CONFIG_GRKERNSEC_PROC_USER
72089+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
72090+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72091+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
72092+#endif
72093 task_dumpable(task)) {
72094 rcu_read_lock();
72095 cred = __task_cred(task);
72096 inode->i_uid = cred->euid;
72097+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72098+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
72099+#else
72100 inode->i_gid = cred->egid;
72101+#endif
72102 rcu_read_unlock();
72103 } else {
72104 inode->i_uid = 0;
72105@@ -1670,7 +1782,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
72106 int fd = proc_fd(inode);
72107
72108 if (task) {
72109- files = get_files_struct(task);
72110+ if (!gr_acl_handle_procpidmem(task))
72111+ files = get_files_struct(task);
72112 put_task_struct(task);
72113 }
72114 if (files) {
72115@@ -1922,12 +2035,22 @@ static const struct file_operations proc_fd_operations = {
72116 static int proc_fd_permission(struct inode *inode, int mask)
72117 {
72118 int rv;
72119+ struct task_struct *task;
72120
72121 rv = generic_permission(inode, mask, NULL);
72122- if (rv == 0)
72123- return 0;
72124+
72125 if (task_pid(current) == proc_pid(inode))
72126 rv = 0;
72127+
72128+ task = get_proc_task(inode);
72129+ if (task == NULL)
72130+ return rv;
72131+
72132+ if (gr_acl_handle_procpidmem(task))
72133+ rv = -EACCES;
72134+
72135+ put_task_struct(task);
72136+
72137 return rv;
72138 }
72139
72140@@ -2036,6 +2159,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
72141 if (!task)
72142 goto out_no_task;
72143
72144+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
72145+ goto out;
72146+
72147 /*
72148 * Yes, it does not scale. And it should not. Don't add
72149 * new entries into /proc/<tgid>/ without very good reasons.
72150@@ -2080,6 +2206,9 @@ static int proc_pident_readdir(struct file *filp,
72151 if (!task)
72152 goto out_no_task;
72153
72154+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
72155+ goto out;
72156+
72157 ret = 0;
72158 i = filp->f_pos;
72159 switch (i) {
72160@@ -2347,7 +2476,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
72161 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
72162 void *cookie)
72163 {
72164- char *s = nd_get_link(nd);
72165+ const char *s = nd_get_link(nd);
72166 if (!IS_ERR(s))
72167 __putname(s);
72168 }
72169@@ -2553,7 +2682,7 @@ static const struct pid_entry tgid_base_stuff[] = {
72170 #ifdef CONFIG_SCHED_DEBUG
72171 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
72172 #endif
72173-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
72174+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
72175 INF("syscall", S_IRUGO, proc_pid_syscall),
72176 #endif
72177 INF("cmdline", S_IRUGO, proc_pid_cmdline),
72178@@ -2578,10 +2707,10 @@ static const struct pid_entry tgid_base_stuff[] = {
72179 #ifdef CONFIG_SECURITY
72180 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
72181 #endif
72182-#ifdef CONFIG_KALLSYMS
72183+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72184 INF("wchan", S_IRUGO, proc_pid_wchan),
72185 #endif
72186-#ifdef CONFIG_STACKTRACE
72187+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72188 ONE("stack", S_IRUGO, proc_pid_stack),
72189 #endif
72190 #ifdef CONFIG_SCHEDSTATS
72191@@ -2611,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
72192 #ifdef CONFIG_TASK_IO_ACCOUNTING
72193 INF("io", S_IRUSR, proc_tgid_io_accounting),
72194 #endif
72195+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
72196+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
72197+#endif
72198 };
72199
72200 static int proc_tgid_base_readdir(struct file * filp,
72201@@ -2735,7 +2867,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
72202 if (!inode)
72203 goto out;
72204
72205+#ifdef CONFIG_GRKERNSEC_PROC_USER
72206+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
72207+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72208+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
72209+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
72210+#else
72211 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
72212+#endif
72213 inode->i_op = &proc_tgid_base_inode_operations;
72214 inode->i_fop = &proc_tgid_base_operations;
72215 inode->i_flags|=S_IMMUTABLE;
72216@@ -2777,7 +2916,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
72217 if (!task)
72218 goto out;
72219
72220+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
72221+ goto out_put_task;
72222+
72223 result = proc_pid_instantiate(dir, dentry, task, NULL);
72224+out_put_task:
72225 put_task_struct(task);
72226 out:
72227 return result;
72228@@ -2842,6 +2985,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
72229 {
72230 unsigned int nr;
72231 struct task_struct *reaper;
72232+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72233+ const struct cred *tmpcred = current_cred();
72234+ const struct cred *itercred;
72235+#endif
72236+ filldir_t __filldir = filldir;
72237 struct tgid_iter iter;
72238 struct pid_namespace *ns;
72239
72240@@ -2865,8 +3013,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
72241 for (iter = next_tgid(ns, iter);
72242 iter.task;
72243 iter.tgid += 1, iter = next_tgid(ns, iter)) {
72244+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72245+ rcu_read_lock();
72246+ itercred = __task_cred(iter.task);
72247+#endif
72248+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
72249+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72250+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
72251+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72252+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
72253+#endif
72254+ )
72255+#endif
72256+ )
72257+ __filldir = &gr_fake_filldir;
72258+ else
72259+ __filldir = filldir;
72260+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72261+ rcu_read_unlock();
72262+#endif
72263 filp->f_pos = iter.tgid + TGID_OFFSET;
72264- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
72265+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
72266 put_task_struct(iter.task);
72267 goto out;
72268 }
72269@@ -2892,7 +3059,7 @@ static const struct pid_entry tid_base_stuff[] = {
72270 #ifdef CONFIG_SCHED_DEBUG
72271 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
72272 #endif
72273-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
72274+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
72275 INF("syscall", S_IRUGO, proc_pid_syscall),
72276 #endif
72277 INF("cmdline", S_IRUGO, proc_pid_cmdline),
72278@@ -2916,10 +3083,10 @@ static const struct pid_entry tid_base_stuff[] = {
72279 #ifdef CONFIG_SECURITY
72280 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
72281 #endif
72282-#ifdef CONFIG_KALLSYMS
72283+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72284 INF("wchan", S_IRUGO, proc_pid_wchan),
72285 #endif
72286-#ifdef CONFIG_STACKTRACE
72287+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72288 ONE("stack", S_IRUGO, proc_pid_stack),
72289 #endif
72290 #ifdef CONFIG_SCHEDSTATS
72291diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
72292index 82676e3..5f8518a 100644
72293--- a/fs/proc/cmdline.c
72294+++ b/fs/proc/cmdline.c
72295@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
72296
72297 static int __init proc_cmdline_init(void)
72298 {
72299+#ifdef CONFIG_GRKERNSEC_PROC_ADD
72300+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
72301+#else
72302 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
72303+#endif
72304 return 0;
72305 }
72306 module_init(proc_cmdline_init);
72307diff --git a/fs/proc/devices.c b/fs/proc/devices.c
72308index 59ee7da..469b4b6 100644
72309--- a/fs/proc/devices.c
72310+++ b/fs/proc/devices.c
72311@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
72312
72313 static int __init proc_devices_init(void)
72314 {
72315+#ifdef CONFIG_GRKERNSEC_PROC_ADD
72316+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
72317+#else
72318 proc_create("devices", 0, NULL, &proc_devinfo_operations);
72319+#endif
72320 return 0;
72321 }
72322 module_init(proc_devices_init);
72323diff --git a/fs/proc/inode.c b/fs/proc/inode.c
72324index d78ade3..81767f9 100644
72325--- a/fs/proc/inode.c
72326+++ b/fs/proc/inode.c
72327@@ -18,12 +18,19 @@
72328 #include <linux/module.h>
72329 #include <linux/smp_lock.h>
72330 #include <linux/sysctl.h>
72331+#include <linux/grsecurity.h>
72332
72333 #include <asm/system.h>
72334 #include <asm/uaccess.h>
72335
72336 #include "internal.h"
72337
72338+#ifdef CONFIG_PROC_SYSCTL
72339+extern const struct inode_operations proc_sys_inode_operations;
72340+extern const struct inode_operations proc_sys_dir_operations;
72341+#endif
72342+
72343+
72344 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
72345 {
72346 atomic_inc(&de->count);
72347@@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode)
72348 de_put(de);
72349 if (PROC_I(inode)->sysctl)
72350 sysctl_head_put(PROC_I(inode)->sysctl);
72351+
72352+#ifdef CONFIG_PROC_SYSCTL
72353+ if (inode->i_op == &proc_sys_inode_operations ||
72354+ inode->i_op == &proc_sys_dir_operations)
72355+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
72356+#endif
72357+
72358 clear_inode(inode);
72359 }
72360
72361@@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
72362 if (de->mode) {
72363 inode->i_mode = de->mode;
72364 inode->i_uid = de->uid;
72365+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72366+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
72367+#else
72368 inode->i_gid = de->gid;
72369+#endif
72370 }
72371 if (de->size)
72372 inode->i_size = de->size;
72373diff --git a/fs/proc/internal.h b/fs/proc/internal.h
72374index 753ca37..26bcf3b 100644
72375--- a/fs/proc/internal.h
72376+++ b/fs/proc/internal.h
72377@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
72378 struct pid *pid, struct task_struct *task);
72379 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
72380 struct pid *pid, struct task_struct *task);
72381+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
72382+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
72383+#endif
72384 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
72385
72386 extern const struct file_operations proc_maps_operations;
72387diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
72388index b442dac..aab29cb 100644
72389--- a/fs/proc/kcore.c
72390+++ b/fs/proc/kcore.c
72391@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
72392 off_t offset = 0;
72393 struct kcore_list *m;
72394
72395+ pax_track_stack();
72396+
72397 /* setup ELF header */
72398 elf = (struct elfhdr *) bufp;
72399 bufp += sizeof(struct elfhdr);
72400@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
72401 * the addresses in the elf_phdr on our list.
72402 */
72403 start = kc_offset_to_vaddr(*fpos - elf_buflen);
72404- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
72405+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
72406+ if (tsz > buflen)
72407 tsz = buflen;
72408-
72409+
72410 while (buflen) {
72411 struct kcore_list *m;
72412
72413@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
72414 kfree(elf_buf);
72415 } else {
72416 if (kern_addr_valid(start)) {
72417- unsigned long n;
72418+ char *elf_buf;
72419+ mm_segment_t oldfs;
72420
72421- n = copy_to_user(buffer, (char *)start, tsz);
72422- /*
72423- * We cannot distingush between fault on source
72424- * and fault on destination. When this happens
72425- * we clear too and hope it will trigger the
72426- * EFAULT again.
72427- */
72428- if (n) {
72429- if (clear_user(buffer + tsz - n,
72430- n))
72431+ elf_buf = kmalloc(tsz, GFP_KERNEL);
72432+ if (!elf_buf)
72433+ return -ENOMEM;
72434+ oldfs = get_fs();
72435+ set_fs(KERNEL_DS);
72436+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
72437+ set_fs(oldfs);
72438+ if (copy_to_user(buffer, elf_buf, tsz)) {
72439+ kfree(elf_buf);
72440 return -EFAULT;
72441+ }
72442 }
72443+ set_fs(oldfs);
72444+ kfree(elf_buf);
72445 } else {
72446 if (clear_user(buffer, tsz))
72447 return -EFAULT;
72448@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
72449
72450 static int open_kcore(struct inode *inode, struct file *filp)
72451 {
72452+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
72453+ return -EPERM;
72454+#endif
72455 if (!capable(CAP_SYS_RAWIO))
72456 return -EPERM;
72457 if (kcore_need_update)
72458diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
72459index 7ca7834..cfe90a4 100644
72460--- a/fs/proc/kmsg.c
72461+++ b/fs/proc/kmsg.c
72462@@ -12,37 +12,37 @@
72463 #include <linux/poll.h>
72464 #include <linux/proc_fs.h>
72465 #include <linux/fs.h>
72466+#include <linux/syslog.h>
72467
72468 #include <asm/uaccess.h>
72469 #include <asm/io.h>
72470
72471 extern wait_queue_head_t log_wait;
72472
72473-extern int do_syslog(int type, char __user *bug, int count);
72474-
72475 static int kmsg_open(struct inode * inode, struct file * file)
72476 {
72477- return do_syslog(1,NULL,0);
72478+ return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
72479 }
72480
72481 static int kmsg_release(struct inode * inode, struct file * file)
72482 {
72483- (void) do_syslog(0,NULL,0);
72484+ (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
72485 return 0;
72486 }
72487
72488 static ssize_t kmsg_read(struct file *file, char __user *buf,
72489 size_t count, loff_t *ppos)
72490 {
72491- if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
72492+ if ((file->f_flags & O_NONBLOCK) &&
72493+ !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
72494 return -EAGAIN;
72495- return do_syslog(2, buf, count);
72496+ return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
72497 }
72498
72499 static unsigned int kmsg_poll(struct file *file, poll_table *wait)
72500 {
72501 poll_wait(file, &log_wait, wait);
72502- if (do_syslog(9, NULL, 0))
72503+ if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
72504 return POLLIN | POLLRDNORM;
72505 return 0;
72506 }
72507diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
72508index a65239c..ad1182a 100644
72509--- a/fs/proc/meminfo.c
72510+++ b/fs/proc/meminfo.c
72511@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
72512 unsigned long pages[NR_LRU_LISTS];
72513 int lru;
72514
72515+ pax_track_stack();
72516+
72517 /*
72518 * display in kilobytes.
72519 */
72520@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
72521 vmi.used >> 10,
72522 vmi.largest_chunk >> 10
72523 #ifdef CONFIG_MEMORY_FAILURE
72524- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
72525+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
72526 #endif
72527 );
72528
72529diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
72530index 9fe7d7e..cdb62c9 100644
72531--- a/fs/proc/nommu.c
72532+++ b/fs/proc/nommu.c
72533@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
72534 if (len < 1)
72535 len = 1;
72536 seq_printf(m, "%*c", len, ' ');
72537- seq_path(m, &file->f_path, "");
72538+ seq_path(m, &file->f_path, "\n\\");
72539 }
72540
72541 seq_putc(m, '\n');
72542diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
72543index 04d1270..25e1173 100644
72544--- a/fs/proc/proc_net.c
72545+++ b/fs/proc/proc_net.c
72546@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir)
72547 struct task_struct *task;
72548 struct nsproxy *ns;
72549 struct net *net = NULL;
72550+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72551+ const struct cred *cred = current_cred();
72552+#endif
72553+
72554+#ifdef CONFIG_GRKERNSEC_PROC_USER
72555+ if (cred->fsuid)
72556+ return net;
72557+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72558+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
72559+ return net;
72560+#endif
72561
72562 rcu_read_lock();
72563 task = pid_task(proc_pid(dir), PIDTYPE_PID);
72564diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
72565index f667e8a..55f4d96 100644
72566--- a/fs/proc/proc_sysctl.c
72567+++ b/fs/proc/proc_sysctl.c
72568@@ -7,11 +7,13 @@
72569 #include <linux/security.h>
72570 #include "internal.h"
72571
72572+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
72573+
72574 static const struct dentry_operations proc_sys_dentry_operations;
72575 static const struct file_operations proc_sys_file_operations;
72576-static const struct inode_operations proc_sys_inode_operations;
72577+const struct inode_operations proc_sys_inode_operations;
72578 static const struct file_operations proc_sys_dir_file_operations;
72579-static const struct inode_operations proc_sys_dir_operations;
72580+const struct inode_operations proc_sys_dir_operations;
72581
72582 static struct inode *proc_sys_make_inode(struct super_block *sb,
72583 struct ctl_table_header *head, struct ctl_table *table)
72584@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
72585 if (!p)
72586 goto out;
72587
72588+ if (gr_handle_sysctl(p, MAY_EXEC))
72589+ goto out;
72590+
72591 err = ERR_PTR(-ENOMEM);
72592 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
72593 if (h)
72594@@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
72595
72596 err = NULL;
72597 dentry->d_op = &proc_sys_dentry_operations;
72598+
72599+ gr_handle_proc_create(dentry, inode);
72600+
72601 d_add(dentry, inode);
72602
72603 out:
72604@@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
72605 return -ENOMEM;
72606 } else {
72607 child->d_op = &proc_sys_dentry_operations;
72608+
72609+ gr_handle_proc_create(child, inode);
72610+
72611 d_add(child, inode);
72612 }
72613 } else {
72614@@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
72615 if (*pos < file->f_pos)
72616 continue;
72617
72618+ if (gr_handle_sysctl(table, 0))
72619+ continue;
72620+
72621 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
72622 if (res)
72623 return res;
72624@@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
72625 if (IS_ERR(head))
72626 return PTR_ERR(head);
72627
72628+ if (table && gr_handle_sysctl(table, MAY_EXEC))
72629+ return -ENOENT;
72630+
72631 generic_fillattr(inode, stat);
72632 if (table)
72633 stat->mode = (stat->mode & S_IFMT) | table->mode;
72634@@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = {
72635 };
72636
72637 static const struct file_operations proc_sys_dir_file_operations = {
72638+ .read = generic_read_dir,
72639 .readdir = proc_sys_readdir,
72640 .llseek = generic_file_llseek,
72641 };
72642
72643-static const struct inode_operations proc_sys_inode_operations = {
72644+const struct inode_operations proc_sys_inode_operations = {
72645 .permission = proc_sys_permission,
72646 .setattr = proc_sys_setattr,
72647 .getattr = proc_sys_getattr,
72648 };
72649
72650-static const struct inode_operations proc_sys_dir_operations = {
72651+const struct inode_operations proc_sys_dir_operations = {
72652 .lookup = proc_sys_lookup,
72653 .permission = proc_sys_permission,
72654 .setattr = proc_sys_setattr,
72655diff --git a/fs/proc/root.c b/fs/proc/root.c
72656index b080b79..d957e63 100644
72657--- a/fs/proc/root.c
72658+++ b/fs/proc/root.c
72659@@ -134,7 +134,15 @@ void __init proc_root_init(void)
72660 #ifdef CONFIG_PROC_DEVICETREE
72661 proc_device_tree_init();
72662 #endif
72663+#ifdef CONFIG_GRKERNSEC_PROC_ADD
72664+#ifdef CONFIG_GRKERNSEC_PROC_USER
72665+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
72666+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72667+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
72668+#endif
72669+#else
72670 proc_mkdir("bus", NULL);
72671+#endif
72672 proc_sys_init();
72673 }
72674
72675diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
72676index 3b7b82a..4b420b0 100644
72677--- a/fs/proc/task_mmu.c
72678+++ b/fs/proc/task_mmu.c
72679@@ -8,6 +8,7 @@
72680 #include <linux/mempolicy.h>
72681 #include <linux/swap.h>
72682 #include <linux/swapops.h>
72683+#include <linux/grsecurity.h>
72684
72685 #include <asm/elf.h>
72686 #include <asm/uaccess.h>
72687@@ -46,15 +47,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
72688 "VmStk:\t%8lu kB\n"
72689 "VmExe:\t%8lu kB\n"
72690 "VmLib:\t%8lu kB\n"
72691- "VmPTE:\t%8lu kB\n",
72692- hiwater_vm << (PAGE_SHIFT-10),
72693+ "VmPTE:\t%8lu kB\n"
72694+
72695+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72696+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
72697+#endif
72698+
72699+ ,hiwater_vm << (PAGE_SHIFT-10),
72700 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
72701 mm->locked_vm << (PAGE_SHIFT-10),
72702 hiwater_rss << (PAGE_SHIFT-10),
72703 total_rss << (PAGE_SHIFT-10),
72704 data << (PAGE_SHIFT-10),
72705 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
72706- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
72707+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
72708+
72709+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72710+ , mm->context.user_cs_base, mm->context.user_cs_limit
72711+#endif
72712+
72713+ );
72714 }
72715
72716 unsigned long task_vsize(struct mm_struct *mm)
72717@@ -175,7 +187,8 @@ static void m_stop(struct seq_file *m, void *v)
72718 struct proc_maps_private *priv = m->private;
72719 struct vm_area_struct *vma = v;
72720
72721- vma_stop(priv, vma);
72722+ if (!IS_ERR(vma))
72723+ vma_stop(priv, vma);
72724 if (priv->task)
72725 put_task_struct(priv->task);
72726 }
72727@@ -199,6 +212,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
72728 return ret;
72729 }
72730
72731+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72732+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
72733+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
72734+ _mm->pax_flags & MF_PAX_SEGMEXEC))
72735+#endif
72736+
72737 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72738 {
72739 struct mm_struct *mm = vma->vm_mm;
72740@@ -206,7 +225,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72741 int flags = vma->vm_flags;
72742 unsigned long ino = 0;
72743 unsigned long long pgoff = 0;
72744- unsigned long start;
72745 dev_t dev = 0;
72746 int len;
72747
72748@@ -217,20 +235,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72749 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
72750 }
72751
72752- /* We don't show the stack guard page in /proc/maps */
72753- start = vma->vm_start;
72754- if (vma->vm_flags & VM_GROWSDOWN)
72755- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
72756- start += PAGE_SIZE;
72757-
72758 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
72759- start,
72760+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72761+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
72762+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
72763+#else
72764+ vma->vm_start,
72765 vma->vm_end,
72766+#endif
72767 flags & VM_READ ? 'r' : '-',
72768 flags & VM_WRITE ? 'w' : '-',
72769 flags & VM_EXEC ? 'x' : '-',
72770 flags & VM_MAYSHARE ? 's' : 'p',
72771+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72772+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
72773+#else
72774 pgoff,
72775+#endif
72776 MAJOR(dev), MINOR(dev), ino, &len);
72777
72778 /*
72779@@ -239,7 +260,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72780 */
72781 if (file) {
72782 pad_len_spaces(m, len);
72783- seq_path(m, &file->f_path, "\n");
72784+ seq_path(m, &file->f_path, "\n\\");
72785 } else {
72786 const char *name = arch_vma_name(vma);
72787 if (!name) {
72788@@ -247,8 +268,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72789 if (vma->vm_start <= mm->brk &&
72790 vma->vm_end >= mm->start_brk) {
72791 name = "[heap]";
72792- } else if (vma->vm_start <= mm->start_stack &&
72793- vma->vm_end >= mm->start_stack) {
72794+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
72795+ (vma->vm_start <= mm->start_stack &&
72796+ vma->vm_end >= mm->start_stack)) {
72797 name = "[stack]";
72798 }
72799 } else {
72800@@ -269,6 +291,13 @@ static int show_map(struct seq_file *m, void *v)
72801 struct proc_maps_private *priv = m->private;
72802 struct task_struct *task = priv->task;
72803
72804+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72805+ if (current->exec_id != m->exec_id) {
72806+ gr_log_badprocpid("maps");
72807+ return 0;
72808+ }
72809+#endif
72810+
72811 show_map_vma(m, vma);
72812
72813 if (m->count < m->size) /* vma is copied successfully */
72814@@ -390,10 +419,23 @@ static int show_smap(struct seq_file *m, void *v)
72815 .private = &mss,
72816 };
72817
72818+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72819+ if (current->exec_id != m->exec_id) {
72820+ gr_log_badprocpid("smaps");
72821+ return 0;
72822+ }
72823+#endif
72824 memset(&mss, 0, sizeof mss);
72825- mss.vma = vma;
72826- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
72827- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
72828+
72829+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72830+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
72831+#endif
72832+ mss.vma = vma;
72833+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
72834+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
72835+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72836+ }
72837+#endif
72838
72839 show_map_vma(m, vma);
72840
72841@@ -409,7 +451,11 @@ static int show_smap(struct seq_file *m, void *v)
72842 "Swap: %8lu kB\n"
72843 "KernelPageSize: %8lu kB\n"
72844 "MMUPageSize: %8lu kB\n",
72845+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72846+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
72847+#else
72848 (vma->vm_end - vma->vm_start) >> 10,
72849+#endif
72850 mss.resident >> 10,
72851 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
72852 mss.shared_clean >> 10,
72853diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
72854index 8f5c05d..c99c76d 100644
72855--- a/fs/proc/task_nommu.c
72856+++ b/fs/proc/task_nommu.c
72857@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
72858 else
72859 bytes += kobjsize(mm);
72860
72861- if (current->fs && current->fs->users > 1)
72862+ if (current->fs && atomic_read(&current->fs->users) > 1)
72863 sbytes += kobjsize(current->fs);
72864 else
72865 bytes += kobjsize(current->fs);
72866@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
72867 if (len < 1)
72868 len = 1;
72869 seq_printf(m, "%*c", len, ' ');
72870- seq_path(m, &file->f_path, "");
72871+ seq_path(m, &file->f_path, "\n\\");
72872 }
72873
72874 seq_putc(m, '\n');
72875diff --git a/fs/readdir.c b/fs/readdir.c
72876index 7723401..30059a6 100644
72877--- a/fs/readdir.c
72878+++ b/fs/readdir.c
72879@@ -16,6 +16,7 @@
72880 #include <linux/security.h>
72881 #include <linux/syscalls.h>
72882 #include <linux/unistd.h>
72883+#include <linux/namei.h>
72884
72885 #include <asm/uaccess.h>
72886
72887@@ -67,6 +68,7 @@ struct old_linux_dirent {
72888
72889 struct readdir_callback {
72890 struct old_linux_dirent __user * dirent;
72891+ struct file * file;
72892 int result;
72893 };
72894
72895@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
72896 buf->result = -EOVERFLOW;
72897 return -EOVERFLOW;
72898 }
72899+
72900+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
72901+ return 0;
72902+
72903 buf->result++;
72904 dirent = buf->dirent;
72905 if (!access_ok(VERIFY_WRITE, dirent,
72906@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
72907
72908 buf.result = 0;
72909 buf.dirent = dirent;
72910+ buf.file = file;
72911
72912 error = vfs_readdir(file, fillonedir, &buf);
72913 if (buf.result)
72914@@ -142,6 +149,7 @@ struct linux_dirent {
72915 struct getdents_callback {
72916 struct linux_dirent __user * current_dir;
72917 struct linux_dirent __user * previous;
72918+ struct file * file;
72919 int count;
72920 int error;
72921 };
72922@@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
72923 buf->error = -EOVERFLOW;
72924 return -EOVERFLOW;
72925 }
72926+
72927+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
72928+ return 0;
72929+
72930 dirent = buf->previous;
72931 if (dirent) {
72932 if (__put_user(offset, &dirent->d_off))
72933@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
72934 buf.previous = NULL;
72935 buf.count = count;
72936 buf.error = 0;
72937+ buf.file = file;
72938
72939 error = vfs_readdir(file, filldir, &buf);
72940 if (error >= 0)
72941@@ -228,6 +241,7 @@ out:
72942 struct getdents_callback64 {
72943 struct linux_dirent64 __user * current_dir;
72944 struct linux_dirent64 __user * previous;
72945+ struct file *file;
72946 int count;
72947 int error;
72948 };
72949@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
72950 buf->error = -EINVAL; /* only used if we fail.. */
72951 if (reclen > buf->count)
72952 return -EINVAL;
72953+
72954+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
72955+ return 0;
72956+
72957 dirent = buf->previous;
72958 if (dirent) {
72959 if (__put_user(offset, &dirent->d_off))
72960@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
72961
72962 buf.current_dir = dirent;
72963 buf.previous = NULL;
72964+ buf.file = file;
72965 buf.count = count;
72966 buf.error = 0;
72967
72968@@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
72969 error = buf.error;
72970 lastdirent = buf.previous;
72971 if (lastdirent) {
72972- typeof(lastdirent->d_off) d_off = file->f_pos;
72973+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
72974 if (__put_user(d_off, &lastdirent->d_off))
72975 error = -EFAULT;
72976 else
72977diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
72978index d42c30c..4fd8718 100644
72979--- a/fs/reiserfs/dir.c
72980+++ b/fs/reiserfs/dir.c
72981@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
72982 struct reiserfs_dir_entry de;
72983 int ret = 0;
72984
72985+ pax_track_stack();
72986+
72987 reiserfs_write_lock(inode->i_sb);
72988
72989 reiserfs_check_lock_depth(inode->i_sb, "readdir");
72990diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
72991index 128d3f7..8840d44 100644
72992--- a/fs/reiserfs/do_balan.c
72993+++ b/fs/reiserfs/do_balan.c
72994@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
72995 return;
72996 }
72997
72998- atomic_inc(&(fs_generation(tb->tb_sb)));
72999+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
73000 do_balance_starts(tb);
73001
73002 /* balance leaf returns 0 except if combining L R and S into
73003diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
73004index 72cb1cc..d0e3181 100644
73005--- a/fs/reiserfs/item_ops.c
73006+++ b/fs/reiserfs/item_ops.c
73007@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi)
73008 vi->vi_index, vi->vi_type, vi->vi_ih);
73009 }
73010
73011-static struct item_operations stat_data_ops = {
73012+static const struct item_operations stat_data_ops = {
73013 .bytes_number = sd_bytes_number,
73014 .decrement_key = sd_decrement_key,
73015 .is_left_mergeable = sd_is_left_mergeable,
73016@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi)
73017 vi->vi_index, vi->vi_type, vi->vi_ih);
73018 }
73019
73020-static struct item_operations direct_ops = {
73021+static const struct item_operations direct_ops = {
73022 .bytes_number = direct_bytes_number,
73023 .decrement_key = direct_decrement_key,
73024 .is_left_mergeable = direct_is_left_mergeable,
73025@@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi)
73026 vi->vi_index, vi->vi_type, vi->vi_ih);
73027 }
73028
73029-static struct item_operations indirect_ops = {
73030+static const struct item_operations indirect_ops = {
73031 .bytes_number = indirect_bytes_number,
73032 .decrement_key = indirect_decrement_key,
73033 .is_left_mergeable = indirect_is_left_mergeable,
73034@@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi)
73035 printk("\n");
73036 }
73037
73038-static struct item_operations direntry_ops = {
73039+static const struct item_operations direntry_ops = {
73040 .bytes_number = direntry_bytes_number,
73041 .decrement_key = direntry_decrement_key,
73042 .is_left_mergeable = direntry_is_left_mergeable,
73043@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi)
73044 "Invalid item type observed, run fsck ASAP");
73045 }
73046
73047-static struct item_operations errcatch_ops = {
73048+static const struct item_operations errcatch_ops = {
73049 errcatch_bytes_number,
73050 errcatch_decrement_key,
73051 errcatch_is_left_mergeable,
73052@@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = {
73053 #error Item types must use disk-format assigned values.
73054 #endif
73055
73056-struct item_operations *item_ops[TYPE_ANY + 1] = {
73057+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
73058 &stat_data_ops,
73059 &indirect_ops,
73060 &direct_ops,
73061diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
73062index b5fe0aa..e0e25c4 100644
73063--- a/fs/reiserfs/journal.c
73064+++ b/fs/reiserfs/journal.c
73065@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
73066 struct buffer_head *bh;
73067 int i, j;
73068
73069+ pax_track_stack();
73070+
73071 bh = __getblk(dev, block, bufsize);
73072 if (buffer_uptodate(bh))
73073 return (bh);
73074diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
73075index 2715791..b8996db 100644
73076--- a/fs/reiserfs/namei.c
73077+++ b/fs/reiserfs/namei.c
73078@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
73079 unsigned long savelink = 1;
73080 struct timespec ctime;
73081
73082+ pax_track_stack();
73083+
73084 /* three balancings: (1) old name removal, (2) new name insertion
73085 and (3) maybe "save" link insertion
73086 stat data updates: (1) old directory,
73087diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
73088index 9229e55..3d2e3b7 100644
73089--- a/fs/reiserfs/procfs.c
73090+++ b/fs/reiserfs/procfs.c
73091@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
73092 "SMALL_TAILS " : "NO_TAILS ",
73093 replay_only(sb) ? "REPLAY_ONLY " : "",
73094 convert_reiserfs(sb) ? "CONV " : "",
73095- atomic_read(&r->s_generation_counter),
73096+ atomic_read_unchecked(&r->s_generation_counter),
73097 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
73098 SF(s_do_balance), SF(s_unneeded_left_neighbor),
73099 SF(s_good_search_by_key_reada), SF(s_bmaps),
73100@@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
73101 struct journal_params *jp = &rs->s_v1.s_journal;
73102 char b[BDEVNAME_SIZE];
73103
73104+ pax_track_stack();
73105+
73106 seq_printf(m, /* on-disk fields */
73107 "jp_journal_1st_block: \t%i\n"
73108 "jp_journal_dev: \t%s[%x]\n"
73109diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
73110index d036ee5..4c7dca1 100644
73111--- a/fs/reiserfs/stree.c
73112+++ b/fs/reiserfs/stree.c
73113@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
73114 int iter = 0;
73115 #endif
73116
73117+ pax_track_stack();
73118+
73119 BUG_ON(!th->t_trans_id);
73120
73121 init_tb_struct(th, &s_del_balance, sb, path,
73122@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
73123 int retval;
73124 int quota_cut_bytes = 0;
73125
73126+ pax_track_stack();
73127+
73128 BUG_ON(!th->t_trans_id);
73129
73130 le_key2cpu_key(&cpu_key, key);
73131@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
73132 int quota_cut_bytes;
73133 loff_t tail_pos = 0;
73134
73135+ pax_track_stack();
73136+
73137 BUG_ON(!th->t_trans_id);
73138
73139 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
73140@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
73141 int retval;
73142 int fs_gen;
73143
73144+ pax_track_stack();
73145+
73146 BUG_ON(!th->t_trans_id);
73147
73148 fs_gen = get_generation(inode->i_sb);
73149@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
73150 int fs_gen = 0;
73151 int quota_bytes = 0;
73152
73153+ pax_track_stack();
73154+
73155 BUG_ON(!th->t_trans_id);
73156
73157 if (inode) { /* Do we count quotas for item? */
73158diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
73159index 7cb1285..c726cd0 100644
73160--- a/fs/reiserfs/super.c
73161+++ b/fs/reiserfs/super.c
73162@@ -916,6 +916,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
73163 {.option_name = NULL}
73164 };
73165
73166+ pax_track_stack();
73167+
73168 *blocks = 0;
73169 if (!options || !*options)
73170 /* use default configuration: create tails, journaling on, no
73171diff --git a/fs/select.c b/fs/select.c
73172index fd38ce2..f5381b8 100644
73173--- a/fs/select.c
73174+++ b/fs/select.c
73175@@ -20,6 +20,7 @@
73176 #include <linux/module.h>
73177 #include <linux/slab.h>
73178 #include <linux/poll.h>
73179+#include <linux/security.h>
73180 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
73181 #include <linux/file.h>
73182 #include <linux/fdtable.h>
73183@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
73184 int retval, i, timed_out = 0;
73185 unsigned long slack = 0;
73186
73187+ pax_track_stack();
73188+
73189 rcu_read_lock();
73190 retval = max_select_fd(n, fds);
73191 rcu_read_unlock();
73192@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
73193 /* Allocate small arguments on the stack to save memory and be faster */
73194 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
73195
73196+ pax_track_stack();
73197+
73198 ret = -EINVAL;
73199 if (n < 0)
73200 goto out_nofds;
73201@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
73202 struct poll_list *walk = head;
73203 unsigned long todo = nfds;
73204
73205+ pax_track_stack();
73206+
73207+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
73208 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
73209 return -EINVAL;
73210
73211diff --git a/fs/seq_file.c b/fs/seq_file.c
73212index eae7d9d..b7613c6 100644
73213--- a/fs/seq_file.c
73214+++ b/fs/seq_file.c
73215@@ -9,6 +9,7 @@
73216 #include <linux/module.h>
73217 #include <linux/seq_file.h>
73218 #include <linux/slab.h>
73219+#include <linux/sched.h>
73220
73221 #include <asm/uaccess.h>
73222 #include <asm/page.h>
73223@@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
73224 memset(p, 0, sizeof(*p));
73225 mutex_init(&p->lock);
73226 p->op = op;
73227+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73228+ p->exec_id = current->exec_id;
73229+#endif
73230
73231 /*
73232 * Wrappers around seq_open(e.g. swaps_open) need to be
73233@@ -551,7 +555,7 @@ static void single_stop(struct seq_file *p, void *v)
73234 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
73235 void *data)
73236 {
73237- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
73238+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
73239 int res = -ENOMEM;
73240
73241 if (op) {
73242diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
73243index 71c29b6..54694dd 100644
73244--- a/fs/smbfs/proc.c
73245+++ b/fs/smbfs/proc.c
73246@@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
73247
73248 out:
73249 if (server->local_nls != NULL && server->remote_nls != NULL)
73250- server->ops->convert = convert_cp;
73251+ *(void **)&server->ops->convert = convert_cp;
73252 else
73253- server->ops->convert = convert_memcpy;
73254+ *(void **)&server->ops->convert = convert_memcpy;
73255
73256 smb_unlock_server(server);
73257 return n;
73258@@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
73259
73260 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
73261 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
73262- server->ops->getattr = smb_proc_getattr_core;
73263+ *(void **)&server->ops->getattr = smb_proc_getattr_core;
73264 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
73265- server->ops->getattr = smb_proc_getattr_ff;
73266+ *(void **)&server->ops->getattr = smb_proc_getattr_ff;
73267 }
73268
73269 /* Decode server capabilities */
73270@@ -3439,7 +3439,7 @@ out:
73271 static void
73272 install_ops(struct smb_ops *dst, struct smb_ops *src)
73273 {
73274- memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
73275+ memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
73276 }
73277
73278 /* < LANMAN2 */
73279diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
73280index 00b2909..2ace383 100644
73281--- a/fs/smbfs/symlink.c
73282+++ b/fs/smbfs/symlink.c
73283@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
73284
73285 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
73286 {
73287- char *s = nd_get_link(nd);
73288+ const char *s = nd_get_link(nd);
73289 if (!IS_ERR(s))
73290 __putname(s);
73291 }
73292diff --git a/fs/splice.c b/fs/splice.c
73293index bb92b7c..5aa72b0 100644
73294--- a/fs/splice.c
73295+++ b/fs/splice.c
73296@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
73297 pipe_lock(pipe);
73298
73299 for (;;) {
73300- if (!pipe->readers) {
73301+ if (!atomic_read(&pipe->readers)) {
73302 send_sig(SIGPIPE, current, 0);
73303 if (!ret)
73304 ret = -EPIPE;
73305@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
73306 do_wakeup = 0;
73307 }
73308
73309- pipe->waiting_writers++;
73310+ atomic_inc(&pipe->waiting_writers);
73311 pipe_wait(pipe);
73312- pipe->waiting_writers--;
73313+ atomic_dec(&pipe->waiting_writers);
73314 }
73315
73316 pipe_unlock(pipe);
73317@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
73318 .spd_release = spd_release_page,
73319 };
73320
73321+ pax_track_stack();
73322+
73323 index = *ppos >> PAGE_CACHE_SHIFT;
73324 loff = *ppos & ~PAGE_CACHE_MASK;
73325 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
73326@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
73327 old_fs = get_fs();
73328 set_fs(get_ds());
73329 /* The cast to a user pointer is valid due to the set_fs() */
73330- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
73331+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
73332 set_fs(old_fs);
73333
73334 return res;
73335@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
73336 old_fs = get_fs();
73337 set_fs(get_ds());
73338 /* The cast to a user pointer is valid due to the set_fs() */
73339- res = vfs_write(file, (const char __user *)buf, count, &pos);
73340+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
73341 set_fs(old_fs);
73342
73343 return res;
73344@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
73345 .spd_release = spd_release_page,
73346 };
73347
73348+ pax_track_stack();
73349+
73350 index = *ppos >> PAGE_CACHE_SHIFT;
73351 offset = *ppos & ~PAGE_CACHE_MASK;
73352 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
73353@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
73354 goto err;
73355
73356 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
73357- vec[i].iov_base = (void __user *) page_address(page);
73358+ vec[i].iov_base = (__force void __user *) page_address(page);
73359 vec[i].iov_len = this_len;
73360 pages[i] = page;
73361 spd.nr_pages++;
73362@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
73363 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
73364 {
73365 while (!pipe->nrbufs) {
73366- if (!pipe->writers)
73367+ if (!atomic_read(&pipe->writers))
73368 return 0;
73369
73370- if (!pipe->waiting_writers && sd->num_spliced)
73371+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
73372 return 0;
73373
73374 if (sd->flags & SPLICE_F_NONBLOCK)
73375@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
73376 * out of the pipe right after the splice_to_pipe(). So set
73377 * PIPE_READERS appropriately.
73378 */
73379- pipe->readers = 1;
73380+ atomic_set(&pipe->readers, 1);
73381
73382 current->splice_pipe = pipe;
73383 }
73384@@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
73385 .spd_release = spd_release_page,
73386 };
73387
73388+ pax_track_stack();
73389+
73390 pipe = pipe_info(file->f_path.dentry->d_inode);
73391 if (!pipe)
73392 return -EBADF;
73393@@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
73394 ret = -ERESTARTSYS;
73395 break;
73396 }
73397- if (!pipe->writers)
73398+ if (!atomic_read(&pipe->writers))
73399 break;
73400- if (!pipe->waiting_writers) {
73401+ if (!atomic_read(&pipe->waiting_writers)) {
73402 if (flags & SPLICE_F_NONBLOCK) {
73403 ret = -EAGAIN;
73404 break;
73405@@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
73406 pipe_lock(pipe);
73407
73408 while (pipe->nrbufs >= PIPE_BUFFERS) {
73409- if (!pipe->readers) {
73410+ if (!atomic_read(&pipe->readers)) {
73411 send_sig(SIGPIPE, current, 0);
73412 ret = -EPIPE;
73413 break;
73414@@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
73415 ret = -ERESTARTSYS;
73416 break;
73417 }
73418- pipe->waiting_writers++;
73419+ atomic_inc(&pipe->waiting_writers);
73420 pipe_wait(pipe);
73421- pipe->waiting_writers--;
73422+ atomic_dec(&pipe->waiting_writers);
73423 }
73424
73425 pipe_unlock(pipe);
73426@@ -1786,14 +1792,14 @@ retry:
73427 pipe_double_lock(ipipe, opipe);
73428
73429 do {
73430- if (!opipe->readers) {
73431+ if (!atomic_read(&opipe->readers)) {
73432 send_sig(SIGPIPE, current, 0);
73433 if (!ret)
73434 ret = -EPIPE;
73435 break;
73436 }
73437
73438- if (!ipipe->nrbufs && !ipipe->writers)
73439+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
73440 break;
73441
73442 /*
73443@@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
73444 pipe_double_lock(ipipe, opipe);
73445
73446 do {
73447- if (!opipe->readers) {
73448+ if (!atomic_read(&opipe->readers)) {
73449 send_sig(SIGPIPE, current, 0);
73450 if (!ret)
73451 ret = -EPIPE;
73452@@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
73453 * return EAGAIN if we have the potential of some data in the
73454 * future, otherwise just return 0
73455 */
73456- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
73457+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
73458 ret = -EAGAIN;
73459
73460 pipe_unlock(ipipe);
73461diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
73462index 60c702b..dddc2b5 100644
73463--- a/fs/sysfs/bin.c
73464+++ b/fs/sysfs/bin.c
73465@@ -67,6 +67,8 @@ fill_read(struct dentry *dentry, char *buffer, loff_t off, size_t count)
73466 }
73467
73468 static ssize_t
73469+read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off) __size_overflow(3);
73470+static ssize_t
73471 read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off)
73472 {
73473 struct bin_buffer *bb = file->private_data;
73474diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
73475index e020183..18d64b4 100644
73476--- a/fs/sysfs/dir.c
73477+++ b/fs/sysfs/dir.c
73478@@ -678,6 +678,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
73479 struct sysfs_dirent *sd;
73480 int rc;
73481
73482+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
73483+ const char *parent_name = parent_sd->s_name;
73484+
73485+ mode = S_IFDIR | S_IRWXU;
73486+
73487+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
73488+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
73489+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
73490+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
73491+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
73492+#endif
73493+
73494 /* allocate */
73495 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
73496 if (!sd)
73497diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
73498index 7118a38..70af853 100644
73499--- a/fs/sysfs/file.c
73500+++ b/fs/sysfs/file.c
73501@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
73502
73503 struct sysfs_open_dirent {
73504 atomic_t refcnt;
73505- atomic_t event;
73506+ atomic_unchecked_t event;
73507 wait_queue_head_t poll;
73508 struct list_head buffers; /* goes through sysfs_buffer.list */
73509 };
73510@@ -53,7 +53,7 @@ struct sysfs_buffer {
73511 size_t count;
73512 loff_t pos;
73513 char * page;
73514- struct sysfs_ops * ops;
73515+ const struct sysfs_ops * ops;
73516 struct mutex mutex;
73517 int needs_read_fill;
73518 int event;
73519@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
73520 {
73521 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
73522 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
73523- struct sysfs_ops * ops = buffer->ops;
73524+ const struct sysfs_ops * ops = buffer->ops;
73525 int ret = 0;
73526 ssize_t count;
73527
73528@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
73529 if (!sysfs_get_active_two(attr_sd))
73530 return -ENODEV;
73531
73532- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
73533+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
73534 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
73535
73536 sysfs_put_active_two(attr_sd);
73537@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
73538 {
73539 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
73540 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
73541- struct sysfs_ops * ops = buffer->ops;
73542+ const struct sysfs_ops * ops = buffer->ops;
73543 int rc;
73544
73545 /* need attr_sd for attr and ops, its parent for kobj */
73546@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
73547 return -ENOMEM;
73548
73549 atomic_set(&new_od->refcnt, 0);
73550- atomic_set(&new_od->event, 1);
73551+ atomic_set_unchecked(&new_od->event, 1);
73552 init_waitqueue_head(&new_od->poll);
73553 INIT_LIST_HEAD(&new_od->buffers);
73554 goto retry;
73555@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
73556 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
73557 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
73558 struct sysfs_buffer *buffer;
73559- struct sysfs_ops *ops;
73560+ const struct sysfs_ops *ops;
73561 int error = -EACCES;
73562 char *p;
73563
73564@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
73565
73566 sysfs_put_active_two(attr_sd);
73567
73568- if (buffer->event != atomic_read(&od->event))
73569+ if (buffer->event != atomic_read_unchecked(&od->event))
73570 goto trigger;
73571
73572 return DEFAULT_POLLMASK;
73573@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
73574
73575 od = sd->s_attr.open;
73576 if (od) {
73577- atomic_inc(&od->event);
73578+ atomic_inc_unchecked(&od->event);
73579 wake_up_interruptible(&od->poll);
73580 }
73581
73582diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
73583index c5081ad..342ea86 100644
73584--- a/fs/sysfs/symlink.c
73585+++ b/fs/sysfs/symlink.c
73586@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
73587
73588 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
73589 {
73590- char *page = nd_get_link(nd);
73591+ const char *page = nd_get_link(nd);
73592 if (!IS_ERR(page))
73593 free_page((unsigned long)page);
73594 }
73595diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
73596index 1e06853..b06d325 100644
73597--- a/fs/udf/balloc.c
73598+++ b/fs/udf/balloc.c
73599@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
73600
73601 mutex_lock(&sbi->s_alloc_mutex);
73602 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
73603- if (bloc->logicalBlockNum < 0 ||
73604- (bloc->logicalBlockNum + count) >
73605- partmap->s_partition_len) {
73606+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
73607 udf_debug("%d < %d || %d + %d > %d\n",
73608 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
73609 count, partmap->s_partition_len);
73610@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
73611
73612 mutex_lock(&sbi->s_alloc_mutex);
73613 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
73614- if (bloc->logicalBlockNum < 0 ||
73615- (bloc->logicalBlockNum + count) >
73616- partmap->s_partition_len) {
73617+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
73618 udf_debug("%d < %d || %d + %d > %d\n",
73619 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
73620 partmap->s_partition_len);
73621diff --git a/fs/udf/inode.c b/fs/udf/inode.c
73622index 6d24c2c..fff470f 100644
73623--- a/fs/udf/inode.c
73624+++ b/fs/udf/inode.c
73625@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
73626 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
73627 int lastblock = 0;
73628
73629+ pax_track_stack();
73630+
73631 prev_epos.offset = udf_file_entry_alloc_offset(inode);
73632 prev_epos.block = iinfo->i_location;
73633 prev_epos.bh = NULL;
73634diff --git a/fs/udf/misc.c b/fs/udf/misc.c
73635index 9215700..bf1f68e 100644
73636--- a/fs/udf/misc.c
73637+++ b/fs/udf/misc.c
73638@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
73639
73640 u8 udf_tag_checksum(const struct tag *t)
73641 {
73642- u8 *data = (u8 *)t;
73643+ const u8 *data = (const u8 *)t;
73644 u8 checksum = 0;
73645 int i;
73646 for (i = 0; i < sizeof(struct tag); ++i)
73647diff --git a/fs/utimes.c b/fs/utimes.c
73648index e4c75db..b4df0e0 100644
73649--- a/fs/utimes.c
73650+++ b/fs/utimes.c
73651@@ -1,6 +1,7 @@
73652 #include <linux/compiler.h>
73653 #include <linux/file.h>
73654 #include <linux/fs.h>
73655+#include <linux/security.h>
73656 #include <linux/linkage.h>
73657 #include <linux/mount.h>
73658 #include <linux/namei.h>
73659@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
73660 goto mnt_drop_write_and_out;
73661 }
73662 }
73663+
73664+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
73665+ error = -EACCES;
73666+ goto mnt_drop_write_and_out;
73667+ }
73668+
73669 mutex_lock(&inode->i_mutex);
73670 error = notify_change(path->dentry, &newattrs);
73671 mutex_unlock(&inode->i_mutex);
73672diff --git a/fs/xattr.c b/fs/xattr.c
73673index 6d4f6d3..cda3958 100644
73674--- a/fs/xattr.c
73675+++ b/fs/xattr.c
73676@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
73677 * Extended attribute SET operations
73678 */
73679 static long
73680-setxattr(struct dentry *d, const char __user *name, const void __user *value,
73681+setxattr(struct path *path, const char __user *name, const void __user *value,
73682 size_t size, int flags)
73683 {
73684 int error;
73685@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
73686 return PTR_ERR(kvalue);
73687 }
73688
73689- error = vfs_setxattr(d, kname, kvalue, size, flags);
73690+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
73691+ error = -EACCES;
73692+ goto out;
73693+ }
73694+
73695+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
73696+out:
73697 kfree(kvalue);
73698 return error;
73699 }
73700@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
73701 return error;
73702 error = mnt_want_write(path.mnt);
73703 if (!error) {
73704- error = setxattr(path.dentry, name, value, size, flags);
73705+ error = setxattr(&path, name, value, size, flags);
73706 mnt_drop_write(path.mnt);
73707 }
73708 path_put(&path);
73709@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
73710 return error;
73711 error = mnt_want_write(path.mnt);
73712 if (!error) {
73713- error = setxattr(path.dentry, name, value, size, flags);
73714+ error = setxattr(&path, name, value, size, flags);
73715 mnt_drop_write(path.mnt);
73716 }
73717 path_put(&path);
73718@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
73719 const void __user *,value, size_t, size, int, flags)
73720 {
73721 struct file *f;
73722- struct dentry *dentry;
73723 int error = -EBADF;
73724
73725 f = fget(fd);
73726 if (!f)
73727 return error;
73728- dentry = f->f_path.dentry;
73729- audit_inode(NULL, dentry);
73730+ audit_inode(NULL, f->f_path.dentry);
73731 error = mnt_want_write_file(f);
73732 if (!error) {
73733- error = setxattr(dentry, name, value, size, flags);
73734+ error = setxattr(&f->f_path, name, value, size, flags);
73735 mnt_drop_write(f->f_path.mnt);
73736 }
73737 fput(f);
73738diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
73739index c6ad7c7..f2847a7 100644
73740--- a/fs/xattr_acl.c
73741+++ b/fs/xattr_acl.c
73742@@ -17,8 +17,8 @@
73743 struct posix_acl *
73744 posix_acl_from_xattr(const void *value, size_t size)
73745 {
73746- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
73747- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
73748+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
73749+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
73750 int count;
73751 struct posix_acl *acl;
73752 struct posix_acl_entry *acl_e;
73753diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
73754index 942362f..88f96f5 100644
73755--- a/fs/xfs/linux-2.6/xfs_ioctl.c
73756+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
73757@@ -134,7 +134,7 @@ xfs_find_handle(
73758 }
73759
73760 error = -EFAULT;
73761- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
73762+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
73763 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
73764 goto out_put;
73765
73766@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
73767 if (IS_ERR(dentry))
73768 return PTR_ERR(dentry);
73769
73770- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
73771+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
73772 if (!kbuf)
73773 goto out_dput;
73774
73775@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
73776 xfs_mount_t *mp,
73777 void __user *arg)
73778 {
73779- xfs_fsop_geom_t fsgeo;
73780+ xfs_fsop_geom_t fsgeo;
73781 int error;
73782
73783 error = xfs_fs_geometry(mp, &fsgeo, 3);
73784diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
73785index bad485a..479bd32 100644
73786--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
73787+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
73788@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
73789 xfs_fsop_geom_t fsgeo;
73790 int error;
73791
73792+ memset(&fsgeo, 0, sizeof(fsgeo));
73793 error = xfs_fs_geometry(mp, &fsgeo, 3);
73794 if (error)
73795 return -error;
73796diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
73797index 1f3b4b8..6102f6d 100644
73798--- a/fs/xfs/linux-2.6/xfs_iops.c
73799+++ b/fs/xfs/linux-2.6/xfs_iops.c
73800@@ -468,7 +468,7 @@ xfs_vn_put_link(
73801 struct nameidata *nd,
73802 void *p)
73803 {
73804- char *s = nd_get_link(nd);
73805+ const char *s = nd_get_link(nd);
73806
73807 if (!IS_ERR(s))
73808 kfree(s);
73809diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
73810index 8971fb0..5fc1eb2 100644
73811--- a/fs/xfs/xfs_bmap.c
73812+++ b/fs/xfs/xfs_bmap.c
73813@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
73814 int nmap,
73815 int ret_nmap);
73816 #else
73817-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
73818+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
73819 #endif /* DEBUG */
73820
73821 #if defined(XFS_RW_TRACE)
73822diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
73823index e89734e..5e84d8d 100644
73824--- a/fs/xfs/xfs_dir2_sf.c
73825+++ b/fs/xfs/xfs_dir2_sf.c
73826@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
73827 }
73828
73829 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
73830- if (filldir(dirent, sfep->name, sfep->namelen,
73831+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
73832+ char name[sfep->namelen];
73833+ memcpy(name, sfep->name, sfep->namelen);
73834+ if (filldir(dirent, name, sfep->namelen,
73835+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
73836+ *offset = off & 0x7fffffff;
73837+ return 0;
73838+ }
73839+ } else if (filldir(dirent, sfep->name, sfep->namelen,
73840 off & 0x7fffffff, ino, DT_UNKNOWN)) {
73841 *offset = off & 0x7fffffff;
73842 return 0;
73843diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
73844index 8f32f50..b6a41e8 100644
73845--- a/fs/xfs/xfs_vnodeops.c
73846+++ b/fs/xfs/xfs_vnodeops.c
73847@@ -564,13 +564,18 @@ xfs_readlink(
73848
73849 xfs_ilock(ip, XFS_ILOCK_SHARED);
73850
73851- ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
73852- ASSERT(ip->i_d.di_size <= MAXPATHLEN);
73853-
73854 pathlen = ip->i_d.di_size;
73855 if (!pathlen)
73856 goto out;
73857
73858+ if (pathlen > MAXPATHLEN) {
73859+ xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
73860+ __func__, (unsigned long long)ip->i_ino, pathlen);
73861+ ASSERT(0);
73862+ error = XFS_ERROR(EFSCORRUPTED);
73863+ goto out;
73864+ }
73865+
73866 if (ip->i_df.if_flags & XFS_IFINLINE) {
73867 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
73868 link[pathlen] = '\0';
73869diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
73870new file mode 100644
73871index 0000000..50819f8
73872--- /dev/null
73873+++ b/grsecurity/Kconfig
73874@@ -0,0 +1,1077 @@
73875+#
73876+# grecurity configuration
73877+#
73878+
73879+menu "Grsecurity"
73880+
73881+config GRKERNSEC
73882+ bool "Grsecurity"
73883+ select CRYPTO
73884+ select CRYPTO_SHA256
73885+ help
73886+ If you say Y here, you will be able to configure many features
73887+ that will enhance the security of your system. It is highly
73888+ recommended that you say Y here and read through the help
73889+ for each option so that you fully understand the features and
73890+ can evaluate their usefulness for your machine.
73891+
73892+choice
73893+ prompt "Security Level"
73894+ depends on GRKERNSEC
73895+ default GRKERNSEC_CUSTOM
73896+
73897+config GRKERNSEC_LOW
73898+ bool "Low"
73899+ select GRKERNSEC_LINK
73900+ select GRKERNSEC_FIFO
73901+ select GRKERNSEC_RANDNET
73902+ select GRKERNSEC_DMESG
73903+ select GRKERNSEC_CHROOT
73904+ select GRKERNSEC_CHROOT_CHDIR
73905+
73906+ help
73907+ If you choose this option, several of the grsecurity options will
73908+ be enabled that will give you greater protection against a number
73909+ of attacks, while assuring that none of your software will have any
73910+ conflicts with the additional security measures. If you run a lot
73911+ of unusual software, or you are having problems with the higher
73912+ security levels, you should say Y here. With this option, the
73913+ following features are enabled:
73914+
73915+ - Linking restrictions
73916+ - FIFO restrictions
73917+ - Restricted dmesg
73918+ - Enforced chdir("/") on chroot
73919+ - Runtime module disabling
73920+
73921+config GRKERNSEC_MEDIUM
73922+ bool "Medium"
73923+ select PAX
73924+ select PAX_EI_PAX
73925+ select PAX_PT_PAX_FLAGS
73926+ select PAX_HAVE_ACL_FLAGS
73927+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
73928+ select GRKERNSEC_CHROOT
73929+ select GRKERNSEC_CHROOT_SYSCTL
73930+ select GRKERNSEC_LINK
73931+ select GRKERNSEC_FIFO
73932+ select GRKERNSEC_DMESG
73933+ select GRKERNSEC_RANDNET
73934+ select GRKERNSEC_FORKFAIL
73935+ select GRKERNSEC_TIME
73936+ select GRKERNSEC_SIGNAL
73937+ select GRKERNSEC_CHROOT
73938+ select GRKERNSEC_CHROOT_UNIX
73939+ select GRKERNSEC_CHROOT_MOUNT
73940+ select GRKERNSEC_CHROOT_PIVOT
73941+ select GRKERNSEC_CHROOT_DOUBLE
73942+ select GRKERNSEC_CHROOT_CHDIR
73943+ select GRKERNSEC_CHROOT_MKNOD
73944+ select GRKERNSEC_PROC
73945+ select GRKERNSEC_PROC_USERGROUP
73946+ select PAX_RANDUSTACK
73947+ select PAX_ASLR
73948+ select PAX_RANDMMAP
73949+ select PAX_REFCOUNT if (X86 || SPARC64)
73950+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
73951+
73952+ help
73953+ If you say Y here, several features in addition to those included
73954+ in the low additional security level will be enabled. These
73955+ features provide even more security to your system, though in rare
73956+ cases they may be incompatible with very old or poorly written
73957+ software. If you enable this option, make sure that your auth
73958+ service (identd) is running as gid 1001. With this option,
73959+ the following features (in addition to those provided in the
73960+ low additional security level) will be enabled:
73961+
73962+ - Failed fork logging
73963+ - Time change logging
73964+ - Signal logging
73965+ - Deny mounts in chroot
73966+ - Deny double chrooting
73967+ - Deny sysctl writes in chroot
73968+ - Deny mknod in chroot
73969+ - Deny access to abstract AF_UNIX sockets out of chroot
73970+ - Deny pivot_root in chroot
73971+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
73972+ - /proc restrictions with special GID set to 10 (usually wheel)
73973+ - Address Space Layout Randomization (ASLR)
73974+ - Prevent exploitation of most refcount overflows
73975+ - Bounds checking of copying between the kernel and userland
73976+
73977+config GRKERNSEC_HIGH
73978+ bool "High"
73979+ select GRKERNSEC_LINK
73980+ select GRKERNSEC_FIFO
73981+ select GRKERNSEC_DMESG
73982+ select GRKERNSEC_FORKFAIL
73983+ select GRKERNSEC_TIME
73984+ select GRKERNSEC_SIGNAL
73985+ select GRKERNSEC_CHROOT
73986+ select GRKERNSEC_CHROOT_SHMAT
73987+ select GRKERNSEC_CHROOT_UNIX
73988+ select GRKERNSEC_CHROOT_MOUNT
73989+ select GRKERNSEC_CHROOT_FCHDIR
73990+ select GRKERNSEC_CHROOT_PIVOT
73991+ select GRKERNSEC_CHROOT_DOUBLE
73992+ select GRKERNSEC_CHROOT_CHDIR
73993+ select GRKERNSEC_CHROOT_MKNOD
73994+ select GRKERNSEC_CHROOT_CAPS
73995+ select GRKERNSEC_CHROOT_SYSCTL
73996+ select GRKERNSEC_CHROOT_FINDTASK
73997+ select GRKERNSEC_SYSFS_RESTRICT
73998+ select GRKERNSEC_PROC
73999+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
74000+ select GRKERNSEC_HIDESYM
74001+ select GRKERNSEC_BRUTE
74002+ select GRKERNSEC_PROC_USERGROUP
74003+ select GRKERNSEC_KMEM
74004+ select GRKERNSEC_RESLOG
74005+ select GRKERNSEC_RANDNET
74006+ select GRKERNSEC_PROC_ADD
74007+ select GRKERNSEC_CHROOT_CHMOD
74008+ select GRKERNSEC_CHROOT_NICE
74009+ select GRKERNSEC_SETXID
74010+ select GRKERNSEC_AUDIT_MOUNT
74011+ select GRKERNSEC_MODHARDEN if (MODULES)
74012+ select GRKERNSEC_HARDEN_PTRACE
74013+ select GRKERNSEC_PTRACE_READEXEC
74014+ select GRKERNSEC_VM86 if (X86_32)
74015+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
74016+ select PAX
74017+ select PAX_RANDUSTACK
74018+ select PAX_ASLR
74019+ select PAX_RANDMMAP
74020+ select PAX_NOEXEC
74021+ select PAX_MPROTECT
74022+ select PAX_EI_PAX
74023+ select PAX_PT_PAX_FLAGS
74024+ select PAX_HAVE_ACL_FLAGS
74025+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
74026+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
74027+ select PAX_RANDKSTACK if (X86_TSC && X86)
74028+ select PAX_SEGMEXEC if (X86_32)
74029+ select PAX_PAGEEXEC
74030+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
74031+ select PAX_EMUTRAMP if (PARISC)
74032+ select PAX_EMUSIGRT if (PARISC)
74033+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
74034+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
74035+ select PAX_REFCOUNT if (X86 || SPARC64)
74036+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
74037+ help
74038+ If you say Y here, many of the features of grsecurity will be
74039+ enabled, which will protect you against many kinds of attacks
74040+ against your system. The heightened security comes at a cost
74041+ of an increased chance of incompatibilities with rare software
74042+ on your machine. Since this security level enables PaX, you should
74043+ view <http://pax.grsecurity.net> and read about the PaX
74044+ project. While you are there, download chpax and run it on
74045+ binaries that cause problems with PaX. Also remember that
74046+ since the /proc restrictions are enabled, you must run your
74047+ identd as gid 1001. This security level enables the following
74048+ features in addition to those listed in the low and medium
74049+ security levels:
74050+
74051+ - Additional /proc restrictions
74052+ - Chmod restrictions in chroot
74053+ - No signals, ptrace, or viewing of processes outside of chroot
74054+ - Capability restrictions in chroot
74055+ - Deny fchdir out of chroot
74056+ - Priority restrictions in chroot
74057+ - Segmentation-based implementation of PaX
74058+ - Mprotect restrictions
74059+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
74060+ - Kernel stack randomization
74061+ - Mount/unmount/remount logging
74062+ - Kernel symbol hiding
74063+ - Hardening of module auto-loading
74064+ - Ptrace restrictions
74065+ - Restricted vm86 mode
74066+ - Restricted sysfs/debugfs
74067+ - Active kernel exploit response
74068+
74069+config GRKERNSEC_CUSTOM
74070+ bool "Custom"
74071+ help
74072+ If you say Y here, you will be able to configure every grsecurity
74073+ option, which allows you to enable many more features that aren't
74074+ covered in the basic security levels. These additional features
74075+ include TPE, socket restrictions, and the sysctl system for
74076+ grsecurity. It is advised that you read through the help for
74077+ each option to determine its usefulness in your situation.
74078+
74079+endchoice
74080+
74081+menu "Memory Protections"
74082+depends on GRKERNSEC
74083+
74084+config GRKERNSEC_KMEM
74085+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
74086+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
74087+ help
74088+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
74089+ be written to or read from to modify or leak the contents of the running
74090+ kernel. /dev/port will also not be allowed to be opened. If you have module
74091+ support disabled, enabling this will close up four ways that are
74092+ currently used to insert malicious code into the running kernel.
74093+ Even with all these features enabled, we still highly recommend that
74094+ you use the RBAC system, as it is still possible for an attacker to
74095+ modify the running kernel through privileged I/O granted by ioperm/iopl.
74096+ If you are not using XFree86, you may be able to stop this additional
74097+ case by enabling the 'Disable privileged I/O' option. Though nothing
74098+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
74099+ but only to video memory, which is the only writing we allow in this
74100+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
74101+ not be allowed to mprotect it with PROT_WRITE later.
74102+ It is highly recommended that you say Y here if you meet all the
74103+ conditions above.
74104+
74105+config GRKERNSEC_VM86
74106+ bool "Restrict VM86 mode"
74107+ depends on X86_32
74108+
74109+ help
74110+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
74111+ make use of a special execution mode on 32bit x86 processors called
74112+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
74113+ video cards and will still work with this option enabled. The purpose
74114+ of the option is to prevent exploitation of emulation errors in
74115+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
74116+ Nearly all users should be able to enable this option.
74117+
74118+config GRKERNSEC_IO
74119+ bool "Disable privileged I/O"
74120+ depends on X86
74121+ select RTC_CLASS
74122+ select RTC_INTF_DEV
74123+ select RTC_DRV_CMOS
74124+
74125+ help
74126+ If you say Y here, all ioperm and iopl calls will return an error.
74127+ Ioperm and iopl can be used to modify the running kernel.
74128+ Unfortunately, some programs need this access to operate properly,
74129+ the most notable of which are XFree86 and hwclock. hwclock can be
74130+ remedied by having RTC support in the kernel, so real-time
74131+ clock support is enabled if this option is enabled, to ensure
74132+ that hwclock operates correctly. XFree86 still will not
74133+ operate correctly with this option enabled, so DO NOT CHOOSE Y
74134+ IF YOU USE XFree86. If you use XFree86 and you still want to
74135+ protect your kernel against modification, use the RBAC system.
74136+
74137+config GRKERNSEC_PROC_MEMMAP
74138+ bool "Harden ASLR against information leaks and entropy reduction"
74139+ default y if (PAX_NOEXEC || PAX_ASLR)
74140+ depends on PAX_NOEXEC || PAX_ASLR
74141+ help
74142+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
74143+ give no information about the addresses of its mappings if
74144+ PaX features that rely on random addresses are enabled on the task.
74145+ In addition to sanitizing this information and disabling other
74146+ dangerous sources of information, this option causes reads of sensitive
74147+ /proc/<pid> entries where the file descriptor was opened in a different
74148+ task than the one performing the read. Such attempts are logged.
74149+ This option also limits argv/env strings for suid/sgid binaries
74150+ to 512KB to prevent a complete exhaustion of the stack entropy provided
74151+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
74152+ binaries to prevent alternative mmap layouts from being abused.
74153+
74154+ If you use PaX it is essential that you say Y here as it closes up
74155+ several holes that make full ASLR useless locally.
74156+
74157+config GRKERNSEC_BRUTE
74158+ bool "Deter exploit bruteforcing"
74159+ help
74160+ If you say Y here, attempts to bruteforce exploits against forking
74161+ daemons such as apache or sshd, as well as against suid/sgid binaries
74162+ will be deterred. When a child of a forking daemon is killed by PaX
74163+ or crashes due to an illegal instruction or other suspicious signal,
74164+ the parent process will be delayed 30 seconds upon every subsequent
74165+ fork until the administrator is able to assess the situation and
74166+ restart the daemon.
74167+ In the suid/sgid case, the attempt is logged, the user has all their
74168+ processes terminated, and they are prevented from executing any further
74169+ processes for 15 minutes.
74170+ It is recommended that you also enable signal logging in the auditing
74171+ section so that logs are generated when a process triggers a suspicious
74172+ signal.
74173+ If the sysctl option is enabled, a sysctl option with name
74174+ "deter_bruteforce" is created.
74175+
74176+config GRKERNSEC_MODHARDEN
74177+ bool "Harden module auto-loading"
74178+ depends on MODULES
74179+ help
74180+ If you say Y here, module auto-loading in response to use of some
74181+ feature implemented by an unloaded module will be restricted to
74182+ root users. Enabling this option helps defend against attacks
74183+ by unprivileged users who abuse the auto-loading behavior to
74184+ cause a vulnerable module to load that is then exploited.
74185+
74186+ If this option prevents a legitimate use of auto-loading for a
74187+ non-root user, the administrator can execute modprobe manually
74188+ with the exact name of the module mentioned in the alert log.
74189+ Alternatively, the administrator can add the module to the list
74190+ of modules loaded at boot by modifying init scripts.
74191+
74192+ Modification of init scripts will most likely be needed on
74193+ Ubuntu servers with encrypted home directory support enabled,
74194+ as the first non-root user logging in will cause the ecb(aes),
74195+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
74196+
74197+config GRKERNSEC_HIDESYM
74198+ bool "Hide kernel symbols"
74199+ help
74200+ If you say Y here, getting information on loaded modules, and
74201+ displaying all kernel symbols through a syscall will be restricted
74202+ to users with CAP_SYS_MODULE. For software compatibility reasons,
74203+ /proc/kallsyms will be restricted to the root user. The RBAC
74204+ system can hide that entry even from root.
74205+
74206+ This option also prevents leaking of kernel addresses through
74207+ several /proc entries.
74208+
74209+ Note that this option is only effective provided the following
74210+ conditions are met:
74211+ 1) The kernel using grsecurity is not precompiled by some distribution
74212+ 2) You have also enabled GRKERNSEC_DMESG
74213+ 3) You are using the RBAC system and hiding other files such as your
74214+ kernel image and System.map. Alternatively, enabling this option
74215+ causes the permissions on /boot, /lib/modules, and the kernel
74216+ source directory to change at compile time to prevent
74217+ reading by non-root users.
74218+ If the above conditions are met, this option will aid in providing a
74219+ useful protection against local kernel exploitation of overflows
74220+ and arbitrary read/write vulnerabilities.
74221+
74222+config GRKERNSEC_KERN_LOCKOUT
74223+ bool "Active kernel exploit response"
74224+ depends on X86 || ARM || PPC || SPARC
74225+ help
74226+ If you say Y here, when a PaX alert is triggered due to suspicious
74227+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
74228+ or an OOPs occurs due to bad memory accesses, instead of just
74229+ terminating the offending process (and potentially allowing
74230+ a subsequent exploit from the same user), we will take one of two
74231+ actions:
74232+ If the user was root, we will panic the system
74233+ If the user was non-root, we will log the attempt, terminate
74234+ all processes owned by the user, then prevent them from creating
74235+ any new processes until the system is restarted
74236+ This deters repeated kernel exploitation/bruteforcing attempts
74237+ and is useful for later forensics.
74238+
74239+endmenu
74240+menu "Role Based Access Control Options"
74241+depends on GRKERNSEC
74242+
74243+config GRKERNSEC_RBAC_DEBUG
74244+ bool
74245+
74246+config GRKERNSEC_NO_RBAC
74247+ bool "Disable RBAC system"
74248+ help
74249+ If you say Y here, the /dev/grsec device will be removed from the kernel,
74250+ preventing the RBAC system from being enabled. You should only say Y
74251+ here if you have no intention of using the RBAC system, so as to prevent
74252+ an attacker with root access from misusing the RBAC system to hide files
74253+ and processes when loadable module support and /dev/[k]mem have been
74254+ locked down.
74255+
74256+config GRKERNSEC_ACL_HIDEKERN
74257+ bool "Hide kernel processes"
74258+ help
74259+ If you say Y here, all kernel threads will be hidden to all
74260+ processes but those whose subject has the "view hidden processes"
74261+ flag.
74262+
74263+config GRKERNSEC_ACL_MAXTRIES
74264+ int "Maximum tries before password lockout"
74265+ default 3
74266+ help
74267+ This option enforces the maximum number of times a user can attempt
74268+ to authorize themselves with the grsecurity RBAC system before being
74269+ denied the ability to attempt authorization again for a specified time.
74270+ The lower the number, the harder it will be to brute-force a password.
74271+
74272+config GRKERNSEC_ACL_TIMEOUT
74273+ int "Time to wait after max password tries, in seconds"
74274+ default 30
74275+ help
74276+ This option specifies the time the user must wait after attempting to
74277+ authorize to the RBAC system with the maximum number of invalid
74278+ passwords. The higher the number, the harder it will be to brute-force
74279+ a password.
74280+
74281+endmenu
74282+menu "Filesystem Protections"
74283+depends on GRKERNSEC
74284+
74285+config GRKERNSEC_PROC
74286+ bool "Proc restrictions"
74287+ help
74288+ If you say Y here, the permissions of the /proc filesystem
74289+ will be altered to enhance system security and privacy. You MUST
74290+ choose either a user only restriction or a user and group restriction.
74291+ Depending upon the option you choose, you can either restrict users to
74292+ see only the processes they themselves run, or choose a group that can
74293+ view all processes and files normally restricted to root if you choose
74294+ the "restrict to user only" option. NOTE: If you're running identd or
74295+ ntpd as a non-root user, you will have to run it as the group you
74296+ specify here.
74297+
74298+config GRKERNSEC_PROC_USER
74299+ bool "Restrict /proc to user only"
74300+ depends on GRKERNSEC_PROC
74301+ help
74302+ If you say Y here, non-root users will only be able to view their own
74303+ processes, and restricts them from viewing network-related information,
74304+ and viewing kernel symbol and module information.
74305+
74306+config GRKERNSEC_PROC_USERGROUP
74307+ bool "Allow special group"
74308+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
74309+ help
74310+ If you say Y here, you will be able to select a group that will be
74311+ able to view all processes and network-related information. If you've
74312+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
74313+ remain hidden. This option is useful if you want to run identd as
74314+ a non-root user.
74315+
74316+config GRKERNSEC_PROC_GID
74317+ int "GID for special group"
74318+ depends on GRKERNSEC_PROC_USERGROUP
74319+ default 1001
74320+
74321+config GRKERNSEC_PROC_ADD
74322+ bool "Additional restrictions"
74323+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
74324+ help
74325+ If you say Y here, additional restrictions will be placed on
74326+ /proc that keep normal users from viewing device information and
74327+ slabinfo information that could be useful for exploits.
74328+
74329+config GRKERNSEC_LINK
74330+ bool "Linking restrictions"
74331+ help
74332+ If you say Y here, /tmp race exploits will be prevented, since users
74333+ will no longer be able to follow symlinks owned by other users in
74334+ world-writable +t directories (e.g. /tmp), unless the owner of the
74335+ symlink is the owner of the directory. users will also not be
74336+ able to hardlink to files they do not own. If the sysctl option is
74337+ enabled, a sysctl option with name "linking_restrictions" is created.
74338+
74339+config GRKERNSEC_FIFO
74340+ bool "FIFO restrictions"
74341+ help
74342+ If you say Y here, users will not be able to write to FIFOs they don't
74343+ own in world-writable +t directories (e.g. /tmp), unless the owner of
74344+ the FIFO is the same owner of the directory it's held in. If the sysctl
74345+ option is enabled, a sysctl option with name "fifo_restrictions" is
74346+ created.
74347+
74348+config GRKERNSEC_SYSFS_RESTRICT
74349+ bool "Sysfs/debugfs restriction"
74350+ depends on SYSFS
74351+ help
74352+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
74353+ any filesystem normally mounted under it (e.g. debugfs) will be
74354+ mostly accessible only by root. These filesystems generally provide access
74355+ to hardware and debug information that isn't appropriate for unprivileged
74356+ users of the system. Sysfs and debugfs have also become a large source
74357+ of new vulnerabilities, ranging from infoleaks to local compromise.
74358+ There has been very little oversight with an eye toward security involved
74359+ in adding new exporters of information to these filesystems, so their
74360+ use is discouraged.
74361+ For reasons of compatibility, a few directories have been whitelisted
74362+ for access by non-root users:
74363+ /sys/fs/selinux
74364+ /sys/fs/fuse
74365+ /sys/devices/system/cpu
74366+
74367+config GRKERNSEC_ROFS
74368+ bool "Runtime read-only mount protection"
74369+ help
74370+ If you say Y here, a sysctl option with name "romount_protect" will
74371+ be created. By setting this option to 1 at runtime, filesystems
74372+ will be protected in the following ways:
74373+ * No new writable mounts will be allowed
74374+ * Existing read-only mounts won't be able to be remounted read/write
74375+ * Write operations will be denied on all block devices
74376+ This option acts independently of grsec_lock: once it is set to 1,
74377+ it cannot be turned off. Therefore, please be mindful of the resulting
74378+ behavior if this option is enabled in an init script on a read-only
74379+ filesystem. This feature is mainly intended for secure embedded systems.
74380+
74381+config GRKERNSEC_CHROOT
74382+ bool "Chroot jail restrictions"
74383+ help
74384+ If you say Y here, you will be able to choose several options that will
74385+ make breaking out of a chrooted jail much more difficult. If you
74386+ encounter no software incompatibilities with the following options, it
74387+ is recommended that you enable each one.
74388+
74389+config GRKERNSEC_CHROOT_MOUNT
74390+ bool "Deny mounts"
74391+ depends on GRKERNSEC_CHROOT
74392+ help
74393+ If you say Y here, processes inside a chroot will not be able to
74394+ mount or remount filesystems. If the sysctl option is enabled, a
74395+ sysctl option with name "chroot_deny_mount" is created.
74396+
74397+config GRKERNSEC_CHROOT_DOUBLE
74398+ bool "Deny double-chroots"
74399+ depends on GRKERNSEC_CHROOT
74400+ help
74401+ If you say Y here, processes inside a chroot will not be able to chroot
74402+ again outside the chroot. This is a widely used method of breaking
74403+ out of a chroot jail and should not be allowed. If the sysctl
74404+ option is enabled, a sysctl option with name
74405+ "chroot_deny_chroot" is created.
74406+
74407+config GRKERNSEC_CHROOT_PIVOT
74408+ bool "Deny pivot_root in chroot"
74409+ depends on GRKERNSEC_CHROOT
74410+ help
74411+ If you say Y here, processes inside a chroot will not be able to use
74412+ a function called pivot_root() that was introduced in Linux 2.3.41. It
74413+ works similar to chroot in that it changes the root filesystem. This
74414+ function could be misused in a chrooted process to attempt to break out
74415+ of the chroot, and therefore should not be allowed. If the sysctl
74416+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
74417+ created.
74418+
74419+config GRKERNSEC_CHROOT_CHDIR
74420+ bool "Enforce chdir(\"/\") on all chroots"
74421+ depends on GRKERNSEC_CHROOT
74422+ help
74423+ If you say Y here, the current working directory of all newly-chrooted
74424+ applications will be set to the the root directory of the chroot.
74425+ The man page on chroot(2) states:
74426+ Note that this call does not change the current working
74427+ directory, so that `.' can be outside the tree rooted at
74428+ `/'. In particular, the super-user can escape from a
74429+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
74430+
74431+ It is recommended that you say Y here, since it's not known to break
74432+ any software. If the sysctl option is enabled, a sysctl option with
74433+ name "chroot_enforce_chdir" is created.
74434+
74435+config GRKERNSEC_CHROOT_CHMOD
74436+ bool "Deny (f)chmod +s"
74437+ depends on GRKERNSEC_CHROOT
74438+ help
74439+ If you say Y here, processes inside a chroot will not be able to chmod
74440+ or fchmod files to make them have suid or sgid bits. This protects
74441+ against another published method of breaking a chroot. If the sysctl
74442+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
74443+ created.
74444+
74445+config GRKERNSEC_CHROOT_FCHDIR
74446+ bool "Deny fchdir out of chroot"
74447+ depends on GRKERNSEC_CHROOT
74448+ help
74449+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
74450+ to a file descriptor of the chrooting process that points to a directory
74451+ outside the filesystem will be stopped. If the sysctl option
74452+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
74453+
74454+config GRKERNSEC_CHROOT_MKNOD
74455+ bool "Deny mknod"
74456+ depends on GRKERNSEC_CHROOT
74457+ help
74458+ If you say Y here, processes inside a chroot will not be allowed to
74459+ mknod. The problem with using mknod inside a chroot is that it
74460+ would allow an attacker to create a device entry that is the same
74461+ as one on the physical root of your system, which could range from
74462+ anything from the console device to a device for your harddrive (which
74463+ they could then use to wipe the drive or steal data). It is recommended
74464+ that you say Y here, unless you run into software incompatibilities.
74465+ If the sysctl option is enabled, a sysctl option with name
74466+ "chroot_deny_mknod" is created.
74467+
74468+config GRKERNSEC_CHROOT_SHMAT
74469+ bool "Deny shmat() out of chroot"
74470+ depends on GRKERNSEC_CHROOT
74471+ help
74472+ If you say Y here, processes inside a chroot will not be able to attach
74473+ to shared memory segments that were created outside of the chroot jail.
74474+ It is recommended that you say Y here. If the sysctl option is enabled,
74475+ a sysctl option with name "chroot_deny_shmat" is created.
74476+
74477+config GRKERNSEC_CHROOT_UNIX
74478+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
74479+ depends on GRKERNSEC_CHROOT
74480+ help
74481+ If you say Y here, processes inside a chroot will not be able to
74482+ connect to abstract (meaning not belonging to a filesystem) Unix
74483+ domain sockets that were bound outside of a chroot. It is recommended
74484+ that you say Y here. If the sysctl option is enabled, a sysctl option
74485+ with name "chroot_deny_unix" is created.
74486+
74487+config GRKERNSEC_CHROOT_FINDTASK
74488+ bool "Protect outside processes"
74489+ depends on GRKERNSEC_CHROOT
74490+ help
74491+ If you say Y here, processes inside a chroot will not be able to
74492+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
74493+ getsid, or view any process outside of the chroot. If the sysctl
74494+ option is enabled, a sysctl option with name "chroot_findtask" is
74495+ created.
74496+
74497+config GRKERNSEC_CHROOT_NICE
74498+ bool "Restrict priority changes"
74499+ depends on GRKERNSEC_CHROOT
74500+ help
74501+ If you say Y here, processes inside a chroot will not be able to raise
74502+ the priority of processes in the chroot, or alter the priority of
74503+ processes outside the chroot. This provides more security than simply
74504+ removing CAP_SYS_NICE from the process' capability set. If the
74505+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
74506+ is created.
74507+
74508+config GRKERNSEC_CHROOT_SYSCTL
74509+ bool "Deny sysctl writes"
74510+ depends on GRKERNSEC_CHROOT
74511+ help
74512+ If you say Y here, an attacker in a chroot will not be able to
74513+ write to sysctl entries, either by sysctl(2) or through a /proc
74514+ interface. It is strongly recommended that you say Y here. If the
74515+ sysctl option is enabled, a sysctl option with name
74516+ "chroot_deny_sysctl" is created.
74517+
74518+config GRKERNSEC_CHROOT_CAPS
74519+ bool "Capability restrictions"
74520+ depends on GRKERNSEC_CHROOT
74521+ help
74522+ If you say Y here, the capabilities on all processes within a
74523+ chroot jail will be lowered to stop module insertion, raw i/o,
74524+ system and net admin tasks, rebooting the system, modifying immutable
74525+ files, modifying IPC owned by another, and changing the system time.
74526+ This is left an option because it can break some apps. Disable this
74527+ if your chrooted apps are having problems performing those kinds of
74528+ tasks. If the sysctl option is enabled, a sysctl option with
74529+ name "chroot_caps" is created.
74530+
74531+endmenu
74532+menu "Kernel Auditing"
74533+depends on GRKERNSEC
74534+
74535+config GRKERNSEC_AUDIT_GROUP
74536+ bool "Single group for auditing"
74537+ help
74538+ If you say Y here, the exec, chdir, and (un)mount logging features
74539+ will only operate on a group you specify. This option is recommended
74540+ if you only want to watch certain users instead of having a large
74541+ amount of logs from the entire system. If the sysctl option is enabled,
74542+ a sysctl option with name "audit_group" is created.
74543+
74544+config GRKERNSEC_AUDIT_GID
74545+ int "GID for auditing"
74546+ depends on GRKERNSEC_AUDIT_GROUP
74547+ default 1007
74548+
74549+config GRKERNSEC_EXECLOG
74550+ bool "Exec logging"
74551+ help
74552+ If you say Y here, all execve() calls will be logged (since the
74553+ other exec*() calls are frontends to execve(), all execution
74554+ will be logged). Useful for shell-servers that like to keep track
74555+ of their users. If the sysctl option is enabled, a sysctl option with
74556+ name "exec_logging" is created.
74557+ WARNING: This option when enabled will produce a LOT of logs, especially
74558+ on an active system.
74559+
74560+config GRKERNSEC_RESLOG
74561+ bool "Resource logging"
74562+ help
74563+ If you say Y here, all attempts to overstep resource limits will
74564+ be logged with the resource name, the requested size, and the current
74565+ limit. It is highly recommended that you say Y here. If the sysctl
74566+ option is enabled, a sysctl option with name "resource_logging" is
74567+ created. If the RBAC system is enabled, the sysctl value is ignored.
74568+
74569+config GRKERNSEC_CHROOT_EXECLOG
74570+ bool "Log execs within chroot"
74571+ help
74572+ If you say Y here, all executions inside a chroot jail will be logged
74573+ to syslog. This can cause a large amount of logs if certain
74574+ applications (eg. djb's daemontools) are installed on the system, and
74575+ is therefore left as an option. If the sysctl option is enabled, a
74576+ sysctl option with name "chroot_execlog" is created.
74577+
74578+config GRKERNSEC_AUDIT_PTRACE
74579+ bool "Ptrace logging"
74580+ help
74581+ If you say Y here, all attempts to attach to a process via ptrace
74582+ will be logged. If the sysctl option is enabled, a sysctl option
74583+ with name "audit_ptrace" is created.
74584+
74585+config GRKERNSEC_AUDIT_CHDIR
74586+ bool "Chdir logging"
74587+ help
74588+ If you say Y here, all chdir() calls will be logged. If the sysctl
74589+ option is enabled, a sysctl option with name "audit_chdir" is created.
74590+
74591+config GRKERNSEC_AUDIT_MOUNT
74592+ bool "(Un)Mount logging"
74593+ help
74594+ If you say Y here, all mounts and unmounts will be logged. If the
74595+ sysctl option is enabled, a sysctl option with name "audit_mount" is
74596+ created.
74597+
74598+config GRKERNSEC_SIGNAL
74599+ bool "Signal logging"
74600+ help
74601+ If you say Y here, certain important signals will be logged, such as
74602+ SIGSEGV, which will as a result inform you of when a error in a program
74603+ occurred, which in some cases could mean a possible exploit attempt.
74604+ If the sysctl option is enabled, a sysctl option with name
74605+ "signal_logging" is created.
74606+
74607+config GRKERNSEC_FORKFAIL
74608+ bool "Fork failure logging"
74609+ help
74610+ If you say Y here, all failed fork() attempts will be logged.
74611+ This could suggest a fork bomb, or someone attempting to overstep
74612+ their process limit. If the sysctl option is enabled, a sysctl option
74613+ with name "forkfail_logging" is created.
74614+
74615+config GRKERNSEC_TIME
74616+ bool "Time change logging"
74617+ help
74618+ If you say Y here, any changes of the system clock will be logged.
74619+ If the sysctl option is enabled, a sysctl option with name
74620+ "timechange_logging" is created.
74621+
74622+config GRKERNSEC_PROC_IPADDR
74623+ bool "/proc/<pid>/ipaddr support"
74624+ help
74625+ If you say Y here, a new entry will be added to each /proc/<pid>
74626+ directory that contains the IP address of the person using the task.
74627+ The IP is carried across local TCP and AF_UNIX stream sockets.
74628+ This information can be useful for IDS/IPSes to perform remote response
74629+ to a local attack. The entry is readable by only the owner of the
74630+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
74631+ the RBAC system), and thus does not create privacy concerns.
74632+
74633+config GRKERNSEC_RWXMAP_LOG
74634+ bool 'Denied RWX mmap/mprotect logging'
74635+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
74636+ help
74637+ If you say Y here, calls to mmap() and mprotect() with explicit
74638+ usage of PROT_WRITE and PROT_EXEC together will be logged when
74639+ denied by the PAX_MPROTECT feature. If the sysctl option is
74640+ enabled, a sysctl option with name "rwxmap_logging" is created.
74641+
74642+config GRKERNSEC_AUDIT_TEXTREL
74643+ bool 'ELF text relocations logging (READ HELP)'
74644+ depends on PAX_MPROTECT
74645+ help
74646+ If you say Y here, text relocations will be logged with the filename
74647+ of the offending library or binary. The purpose of the feature is
74648+ to help Linux distribution developers get rid of libraries and
74649+ binaries that need text relocations which hinder the future progress
74650+ of PaX. Only Linux distribution developers should say Y here, and
74651+ never on a production machine, as this option creates an information
74652+ leak that could aid an attacker in defeating the randomization of
74653+ a single memory region. If the sysctl option is enabled, a sysctl
74654+ option with name "audit_textrel" is created.
74655+
74656+endmenu
74657+
74658+menu "Executable Protections"
74659+depends on GRKERNSEC
74660+
74661+config GRKERNSEC_DMESG
74662+ bool "Dmesg(8) restriction"
74663+ help
74664+ If you say Y here, non-root users will not be able to use dmesg(8)
74665+ to view up to the last 4kb of messages in the kernel's log buffer.
74666+ The kernel's log buffer often contains kernel addresses and other
74667+ identifying information useful to an attacker in fingerprinting a
74668+ system for a targeted exploit.
74669+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
74670+ created.
74671+
74672+config GRKERNSEC_HARDEN_PTRACE
74673+ bool "Deter ptrace-based process snooping"
74674+ help
74675+ If you say Y here, TTY sniffers and other malicious monitoring
74676+ programs implemented through ptrace will be defeated. If you
74677+ have been using the RBAC system, this option has already been
74678+ enabled for several years for all users, with the ability to make
74679+ fine-grained exceptions.
74680+
74681+ This option only affects the ability of non-root users to ptrace
74682+ processes that are not a descendent of the ptracing process.
74683+ This means that strace ./binary and gdb ./binary will still work,
74684+ but attaching to arbitrary processes will not. If the sysctl
74685+ option is enabled, a sysctl option with name "harden_ptrace" is
74686+ created.
74687+
74688+config GRKERNSEC_PTRACE_READEXEC
74689+ bool "Require read access to ptrace sensitive binaries"
74690+ help
74691+ If you say Y here, unprivileged users will not be able to ptrace unreadable
74692+ binaries. This option is useful in environments that
74693+ remove the read bits (e.g. file mode 4711) from suid binaries to
74694+ prevent infoleaking of their contents. This option adds
74695+ consistency to the use of that file mode, as the binary could normally
74696+ be read out when run without privileges while ptracing.
74697+
74698+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
74699+ is created.
74700+
74701+config GRKERNSEC_SETXID
74702+ bool "Enforce consistent multithreaded privileges"
74703+ help
74704+ If you say Y here, a change from a root uid to a non-root uid
74705+ in a multithreaded application will cause the resulting uids,
74706+ gids, supplementary groups, and capabilities in that thread
74707+ to be propagated to the other threads of the process. In most
74708+ cases this is unnecessary, as glibc will emulate this behavior
74709+ on behalf of the application. Other libcs do not act in the
74710+ same way, allowing the other threads of the process to continue
74711+ running with root privileges. If the sysctl option is enabled,
74712+ a sysctl option with name "consistent_setxid" is created.
74713+
74714+config GRKERNSEC_TPE
74715+ bool "Trusted Path Execution (TPE)"
74716+ help
74717+ If you say Y here, you will be able to choose a gid to add to the
74718+ supplementary groups of users you want to mark as "untrusted."
74719+ These users will not be able to execute any files that are not in
74720+ root-owned directories writable only by root. If the sysctl option
74721+ is enabled, a sysctl option with name "tpe" is created.
74722+
74723+config GRKERNSEC_TPE_ALL
74724+ bool "Partially restrict all non-root users"
74725+ depends on GRKERNSEC_TPE
74726+ help
74727+ If you say Y here, all non-root users will be covered under
74728+ a weaker TPE restriction. This is separate from, and in addition to,
74729+ the main TPE options that you have selected elsewhere. Thus, if a
74730+ "trusted" GID is chosen, this restriction applies to even that GID.
74731+ Under this restriction, all non-root users will only be allowed to
74732+ execute files in directories they own that are not group or
74733+ world-writable, or in directories owned by root and writable only by
74734+ root. If the sysctl option is enabled, a sysctl option with name
74735+ "tpe_restrict_all" is created.
74736+
74737+config GRKERNSEC_TPE_INVERT
74738+ bool "Invert GID option"
74739+ depends on GRKERNSEC_TPE
74740+ help
74741+ If you say Y here, the group you specify in the TPE configuration will
74742+ decide what group TPE restrictions will be *disabled* for. This
74743+ option is useful if you want TPE restrictions to be applied to most
74744+ users on the system. If the sysctl option is enabled, a sysctl option
74745+ with name "tpe_invert" is created. Unlike other sysctl options, this
74746+ entry will default to on for backward-compatibility.
74747+
74748+config GRKERNSEC_TPE_GID
74749+ int "GID for untrusted users"
74750+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
74751+ default 1005
74752+ help
74753+ Setting this GID determines what group TPE restrictions will be
74754+ *enabled* for. If the sysctl option is enabled, a sysctl option
74755+ with name "tpe_gid" is created.
74756+
74757+config GRKERNSEC_TPE_GID
74758+ int "GID for trusted users"
74759+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
74760+ default 1005
74761+ help
74762+ Setting this GID determines what group TPE restrictions will be
74763+ *disabled* for. If the sysctl option is enabled, a sysctl option
74764+ with name "tpe_gid" is created.
74765+
74766+endmenu
74767+menu "Network Protections"
74768+depends on GRKERNSEC
74769+
74770+config GRKERNSEC_RANDNET
74771+ bool "Larger entropy pools"
74772+ help
74773+ If you say Y here, the entropy pools used for many features of Linux
74774+ and grsecurity will be doubled in size. Since several grsecurity
74775+ features use additional randomness, it is recommended that you say Y
74776+ here. Saying Y here has a similar effect as modifying
74777+ /proc/sys/kernel/random/poolsize.
74778+
74779+config GRKERNSEC_BLACKHOLE
74780+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
74781+ depends on NET
74782+ help
74783+ If you say Y here, neither TCP resets nor ICMP
74784+ destination-unreachable packets will be sent in response to packets
74785+ sent to ports for which no associated listening process exists.
74786+ This feature supports both IPV4 and IPV6 and exempts the
74787+ loopback interface from blackholing. Enabling this feature
74788+ makes a host more resilient to DoS attacks and reduces network
74789+ visibility against scanners.
74790+
74791+ The blackhole feature as-implemented is equivalent to the FreeBSD
74792+ blackhole feature, as it prevents RST responses to all packets, not
74793+ just SYNs. Under most application behavior this causes no
74794+ problems, but applications (like haproxy) may not close certain
74795+ connections in a way that cleanly terminates them on the remote
74796+ end, leaving the remote host in LAST_ACK state. Because of this
74797+ side-effect and to prevent intentional LAST_ACK DoSes, this
74798+ feature also adds automatic mitigation against such attacks.
74799+ The mitigation drastically reduces the amount of time a socket
74800+ can spend in LAST_ACK state. If you're using haproxy and not
74801+ all servers it connects to have this option enabled, consider
74802+ disabling this feature on the haproxy host.
74803+
74804+ If the sysctl option is enabled, two sysctl options with names
74805+ "ip_blackhole" and "lastack_retries" will be created.
74806+ While "ip_blackhole" takes the standard zero/non-zero on/off
74807+ toggle, "lastack_retries" uses the same kinds of values as
74808+ "tcp_retries1" and "tcp_retries2". The default value of 4
74809+ prevents a socket from lasting more than 45 seconds in LAST_ACK
74810+ state.
74811+
74812+config GRKERNSEC_SOCKET
74813+ bool "Socket restrictions"
74814+ depends on NET
74815+ help
74816+ If you say Y here, you will be able to choose from several options.
74817+ If you assign a GID on your system and add it to the supplementary
74818+ groups of users you want to restrict socket access to, this patch
74819+ will perform up to three things, based on the option(s) you choose.
74820+
74821+config GRKERNSEC_SOCKET_ALL
74822+ bool "Deny any sockets to group"
74823+ depends on GRKERNSEC_SOCKET
74824+ help
74825+ If you say Y here, you will be able to choose a GID of whose users will
74826+ be unable to connect to other hosts from your machine or run server
74827+ applications from your machine. If the sysctl option is enabled, a
74828+ sysctl option with name "socket_all" is created.
74829+
74830+config GRKERNSEC_SOCKET_ALL_GID
74831+ int "GID to deny all sockets for"
74832+ depends on GRKERNSEC_SOCKET_ALL
74833+ default 1004
74834+ help
74835+ Here you can choose the GID to disable socket access for. Remember to
74836+ add the users you want socket access disabled for to the GID
74837+ specified here. If the sysctl option is enabled, a sysctl option
74838+ with name "socket_all_gid" is created.
74839+
74840+config GRKERNSEC_SOCKET_CLIENT
74841+ bool "Deny client sockets to group"
74842+ depends on GRKERNSEC_SOCKET
74843+ help
74844+ If you say Y here, you will be able to choose a GID of whose users will
74845+ be unable to connect to other hosts from your machine, but will be
74846+ able to run servers. If this option is enabled, all users in the group
74847+ you specify will have to use passive mode when initiating ftp transfers
74848+ from the shell on your machine. If the sysctl option is enabled, a
74849+ sysctl option with name "socket_client" is created.
74850+
74851+config GRKERNSEC_SOCKET_CLIENT_GID
74852+ int "GID to deny client sockets for"
74853+ depends on GRKERNSEC_SOCKET_CLIENT
74854+ default 1003
74855+ help
74856+ Here you can choose the GID to disable client socket access for.
74857+ Remember to add the users you want client socket access disabled for to
74858+ the GID specified here. If the sysctl option is enabled, a sysctl
74859+ option with name "socket_client_gid" is created.
74860+
74861+config GRKERNSEC_SOCKET_SERVER
74862+ bool "Deny server sockets to group"
74863+ depends on GRKERNSEC_SOCKET
74864+ help
74865+ If you say Y here, you will be able to choose a GID of whose users will
74866+ be unable to run server applications from your machine. If the sysctl
74867+ option is enabled, a sysctl option with name "socket_server" is created.
74868+
74869+config GRKERNSEC_SOCKET_SERVER_GID
74870+ int "GID to deny server sockets for"
74871+ depends on GRKERNSEC_SOCKET_SERVER
74872+ default 1002
74873+ help
74874+ Here you can choose the GID to disable server socket access for.
74875+ Remember to add the users you want server socket access disabled for to
74876+ the GID specified here. If the sysctl option is enabled, a sysctl
74877+ option with name "socket_server_gid" is created.
74878+
74879+endmenu
74880+menu "Sysctl support"
74881+depends on GRKERNSEC && SYSCTL
74882+
74883+config GRKERNSEC_SYSCTL
74884+ bool "Sysctl support"
74885+ help
74886+ If you say Y here, you will be able to change the options that
74887+ grsecurity runs with at bootup, without having to recompile your
74888+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
74889+ to enable (1) or disable (0) various features. All the sysctl entries
74890+ are mutable until the "grsec_lock" entry is set to a non-zero value.
74891+ All features enabled in the kernel configuration are disabled at boot
74892+ if you do not say Y to the "Turn on features by default" option.
74893+ All options should be set at startup, and the grsec_lock entry should
74894+ be set to a non-zero value after all the options are set.
74895+ *THIS IS EXTREMELY IMPORTANT*
74896+
74897+config GRKERNSEC_SYSCTL_DISTRO
74898+ bool "Extra sysctl support for distro makers (READ HELP)"
74899+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
74900+ help
74901+ If you say Y here, additional sysctl options will be created
74902+ for features that affect processes running as root. Therefore,
74903+ it is critical when using this option that the grsec_lock entry be
74904+ enabled after boot. Only distros with prebuilt kernel packages
74905+ with this option enabled that can ensure grsec_lock is enabled
74906+ after boot should use this option.
74907+ *Failure to set grsec_lock after boot makes all grsec features
74908+ this option covers useless*
74909+
74910+ Currently this option creates the following sysctl entries:
74911+ "Disable Privileged I/O": "disable_priv_io"
74912+
74913+config GRKERNSEC_SYSCTL_ON
74914+ bool "Turn on features by default"
74915+ depends on GRKERNSEC_SYSCTL
74916+ help
74917+ If you say Y here, instead of having all features enabled in the
74918+ kernel configuration disabled at boot time, the features will be
74919+ enabled at boot time. It is recommended you say Y here unless
74920+ there is some reason you would want all sysctl-tunable features to
74921+ be disabled by default. As mentioned elsewhere, it is important
74922+ to enable the grsec_lock entry once you have finished modifying
74923+ the sysctl entries.
74924+
74925+endmenu
74926+menu "Logging Options"
74927+depends on GRKERNSEC
74928+
74929+config GRKERNSEC_FLOODTIME
74930+ int "Seconds in between log messages (minimum)"
74931+ default 10
74932+ help
74933+ This option allows you to enforce the number of seconds between
74934+ grsecurity log messages. The default should be suitable for most
74935+ people, however, if you choose to change it, choose a value small enough
74936+ to allow informative logs to be produced, but large enough to
74937+ prevent flooding.
74938+
74939+config GRKERNSEC_FLOODBURST
74940+ int "Number of messages in a burst (maximum)"
74941+ default 6
74942+ help
74943+ This option allows you to choose the maximum number of messages allowed
74944+ within the flood time interval you chose in a separate option. The
74945+ default should be suitable for most people, however if you find that
74946+ many of your logs are being interpreted as flooding, you may want to
74947+ raise this value.
74948+
74949+endmenu
74950+
74951+endmenu
74952diff --git a/grsecurity/Makefile b/grsecurity/Makefile
74953new file mode 100644
74954index 0000000..1b9afa9
74955--- /dev/null
74956+++ b/grsecurity/Makefile
74957@@ -0,0 +1,38 @@
74958+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
74959+# during 2001-2009 it has been completely redesigned by Brad Spengler
74960+# into an RBAC system
74961+#
74962+# All code in this directory and various hooks inserted throughout the kernel
74963+# are copyright Brad Spengler - Open Source Security, Inc., and released
74964+# under the GPL v2 or higher
74965+
74966+KBUILD_CFLAGS += -Werror
74967+
74968+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
74969+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
74970+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
74971+
74972+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
74973+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
74974+ gracl_learn.o grsec_log.o
74975+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
74976+
74977+ifdef CONFIG_NET
74978+obj-y += grsec_sock.o
74979+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
74980+endif
74981+
74982+ifndef CONFIG_GRKERNSEC
74983+obj-y += grsec_disabled.o
74984+endif
74985+
74986+ifdef CONFIG_GRKERNSEC_HIDESYM
74987+extra-y := grsec_hidesym.o
74988+$(obj)/grsec_hidesym.o:
74989+ @-chmod -f 500 /boot
74990+ @-chmod -f 500 /lib/modules
74991+ @-chmod -f 500 /lib64/modules
74992+ @-chmod -f 500 /lib32/modules
74993+ @-chmod -f 700 .
74994+ @echo ' grsec: protected kernel image paths'
74995+endif
74996diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
74997new file mode 100644
74998index 0000000..dc4812b
74999--- /dev/null
75000+++ b/grsecurity/gracl.c
75001@@ -0,0 +1,4148 @@
75002+#include <linux/kernel.h>
75003+#include <linux/module.h>
75004+#include <linux/sched.h>
75005+#include <linux/mm.h>
75006+#include <linux/file.h>
75007+#include <linux/fs.h>
75008+#include <linux/namei.h>
75009+#include <linux/mount.h>
75010+#include <linux/tty.h>
75011+#include <linux/proc_fs.h>
75012+#include <linux/smp_lock.h>
75013+#include <linux/slab.h>
75014+#include <linux/vmalloc.h>
75015+#include <linux/types.h>
75016+#include <linux/sysctl.h>
75017+#include <linux/netdevice.h>
75018+#include <linux/ptrace.h>
75019+#include <linux/gracl.h>
75020+#include <linux/gralloc.h>
75021+#include <linux/security.h>
75022+#include <linux/grinternal.h>
75023+#include <linux/pid_namespace.h>
75024+#include <linux/fdtable.h>
75025+#include <linux/percpu.h>
75026+
75027+#include <asm/uaccess.h>
75028+#include <asm/errno.h>
75029+#include <asm/mman.h>
75030+
75031+static struct acl_role_db acl_role_set;
75032+static struct name_db name_set;
75033+static struct inodev_db inodev_set;
75034+
75035+/* for keeping track of userspace pointers used for subjects, so we
75036+ can share references in the kernel as well
75037+*/
75038+
75039+static struct dentry *real_root;
75040+static struct vfsmount *real_root_mnt;
75041+
75042+static struct acl_subj_map_db subj_map_set;
75043+
75044+static struct acl_role_label *default_role;
75045+
75046+static struct acl_role_label *role_list;
75047+
75048+static u16 acl_sp_role_value;
75049+
75050+extern char *gr_shared_page[4];
75051+static DEFINE_MUTEX(gr_dev_mutex);
75052+DEFINE_RWLOCK(gr_inode_lock);
75053+
75054+struct gr_arg *gr_usermode;
75055+
75056+static unsigned int gr_status __read_only = GR_STATUS_INIT;
75057+
75058+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
75059+extern void gr_clear_learn_entries(void);
75060+
75061+#ifdef CONFIG_GRKERNSEC_RESLOG
75062+extern void gr_log_resource(const struct task_struct *task,
75063+ const int res, const unsigned long wanted, const int gt);
75064+#endif
75065+
75066+unsigned char *gr_system_salt;
75067+unsigned char *gr_system_sum;
75068+
75069+static struct sprole_pw **acl_special_roles = NULL;
75070+static __u16 num_sprole_pws = 0;
75071+
75072+static struct acl_role_label *kernel_role = NULL;
75073+
75074+static unsigned int gr_auth_attempts = 0;
75075+static unsigned long gr_auth_expires = 0UL;
75076+
75077+#ifdef CONFIG_NET
75078+extern struct vfsmount *sock_mnt;
75079+#endif
75080+extern struct vfsmount *pipe_mnt;
75081+extern struct vfsmount *shm_mnt;
75082+#ifdef CONFIG_HUGETLBFS
75083+extern struct vfsmount *hugetlbfs_vfsmount;
75084+#endif
75085+
75086+static struct acl_object_label *fakefs_obj_rw;
75087+static struct acl_object_label *fakefs_obj_rwx;
75088+
75089+extern int gr_init_uidset(void);
75090+extern void gr_free_uidset(void);
75091+extern void gr_remove_uid(uid_t uid);
75092+extern int gr_find_uid(uid_t uid);
75093+
75094+__inline__ int
75095+gr_acl_is_enabled(void)
75096+{
75097+ return (gr_status & GR_READY);
75098+}
75099+
75100+#ifdef CONFIG_BTRFS_FS
75101+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
75102+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
75103+#endif
75104+
75105+static inline dev_t __get_dev(const struct dentry *dentry)
75106+{
75107+#ifdef CONFIG_BTRFS_FS
75108+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
75109+ return get_btrfs_dev_from_inode(dentry->d_inode);
75110+ else
75111+#endif
75112+ return dentry->d_inode->i_sb->s_dev;
75113+}
75114+
75115+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
75116+{
75117+ return __get_dev(dentry);
75118+}
75119+
75120+static char gr_task_roletype_to_char(struct task_struct *task)
75121+{
75122+ switch (task->role->roletype &
75123+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
75124+ GR_ROLE_SPECIAL)) {
75125+ case GR_ROLE_DEFAULT:
75126+ return 'D';
75127+ case GR_ROLE_USER:
75128+ return 'U';
75129+ case GR_ROLE_GROUP:
75130+ return 'G';
75131+ case GR_ROLE_SPECIAL:
75132+ return 'S';
75133+ }
75134+
75135+ return 'X';
75136+}
75137+
75138+char gr_roletype_to_char(void)
75139+{
75140+ return gr_task_roletype_to_char(current);
75141+}
75142+
75143+__inline__ int
75144+gr_acl_tpe_check(void)
75145+{
75146+ if (unlikely(!(gr_status & GR_READY)))
75147+ return 0;
75148+ if (current->role->roletype & GR_ROLE_TPE)
75149+ return 1;
75150+ else
75151+ return 0;
75152+}
75153+
75154+int
75155+gr_handle_rawio(const struct inode *inode)
75156+{
75157+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
75158+ if (inode && S_ISBLK(inode->i_mode) &&
75159+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
75160+ !capable(CAP_SYS_RAWIO))
75161+ return 1;
75162+#endif
75163+ return 0;
75164+}
75165+
75166+static int
75167+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
75168+{
75169+ if (likely(lena != lenb))
75170+ return 0;
75171+
75172+ return !memcmp(a, b, lena);
75173+}
75174+
75175+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
75176+{
75177+ *buflen -= namelen;
75178+ if (*buflen < 0)
75179+ return -ENAMETOOLONG;
75180+ *buffer -= namelen;
75181+ memcpy(*buffer, str, namelen);
75182+ return 0;
75183+}
75184+
75185+/* this must be called with vfsmount_lock and dcache_lock held */
75186+
75187+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
75188+ struct dentry *root, struct vfsmount *rootmnt,
75189+ char *buffer, int buflen)
75190+{
75191+ char * end = buffer+buflen;
75192+ char * retval;
75193+ int namelen;
75194+
75195+ *--end = '\0';
75196+ buflen--;
75197+
75198+ if (buflen < 1)
75199+ goto Elong;
75200+ /* Get '/' right */
75201+ retval = end-1;
75202+ *retval = '/';
75203+
75204+ for (;;) {
75205+ struct dentry * parent;
75206+
75207+ if (dentry == root && vfsmnt == rootmnt)
75208+ break;
75209+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
75210+ /* Global root? */
75211+ if (vfsmnt->mnt_parent == vfsmnt)
75212+ goto global_root;
75213+ dentry = vfsmnt->mnt_mountpoint;
75214+ vfsmnt = vfsmnt->mnt_parent;
75215+ continue;
75216+ }
75217+ parent = dentry->d_parent;
75218+ prefetch(parent);
75219+ namelen = dentry->d_name.len;
75220+ buflen -= namelen + 1;
75221+ if (buflen < 0)
75222+ goto Elong;
75223+ end -= namelen;
75224+ memcpy(end, dentry->d_name.name, namelen);
75225+ *--end = '/';
75226+ retval = end;
75227+ dentry = parent;
75228+ }
75229+
75230+out:
75231+ return retval;
75232+
75233+global_root:
75234+ namelen = dentry->d_name.len;
75235+ buflen -= namelen;
75236+ if (buflen < 0)
75237+ goto Elong;
75238+ retval -= namelen-1; /* hit the slash */
75239+ memcpy(retval, dentry->d_name.name, namelen);
75240+ goto out;
75241+Elong:
75242+ retval = ERR_PTR(-ENAMETOOLONG);
75243+ goto out;
75244+}
75245+
75246+static char *
75247+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
75248+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
75249+{
75250+ char *retval;
75251+
75252+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
75253+ if (unlikely(IS_ERR(retval)))
75254+ retval = strcpy(buf, "<path too long>");
75255+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
75256+ retval[1] = '\0';
75257+
75258+ return retval;
75259+}
75260+
75261+static char *
75262+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
75263+ char *buf, int buflen)
75264+{
75265+ char *res;
75266+
75267+ /* we can use real_root, real_root_mnt, because this is only called
75268+ by the RBAC system */
75269+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
75270+
75271+ return res;
75272+}
75273+
75274+static char *
75275+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
75276+ char *buf, int buflen)
75277+{
75278+ char *res;
75279+ struct dentry *root;
75280+ struct vfsmount *rootmnt;
75281+ struct task_struct *reaper = &init_task;
75282+
75283+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
75284+ read_lock(&reaper->fs->lock);
75285+ root = dget(reaper->fs->root.dentry);
75286+ rootmnt = mntget(reaper->fs->root.mnt);
75287+ read_unlock(&reaper->fs->lock);
75288+
75289+ spin_lock(&dcache_lock);
75290+ spin_lock(&vfsmount_lock);
75291+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
75292+ spin_unlock(&vfsmount_lock);
75293+ spin_unlock(&dcache_lock);
75294+
75295+ dput(root);
75296+ mntput(rootmnt);
75297+ return res;
75298+}
75299+
75300+static char *
75301+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
75302+{
75303+ char *ret;
75304+ spin_lock(&dcache_lock);
75305+ spin_lock(&vfsmount_lock);
75306+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
75307+ PAGE_SIZE);
75308+ spin_unlock(&vfsmount_lock);
75309+ spin_unlock(&dcache_lock);
75310+ return ret;
75311+}
75312+
75313+static char *
75314+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
75315+{
75316+ char *ret;
75317+ char *buf;
75318+ int buflen;
75319+
75320+ spin_lock(&dcache_lock);
75321+ spin_lock(&vfsmount_lock);
75322+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
75323+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
75324+ buflen = (int)(ret - buf);
75325+ if (buflen >= 5)
75326+ prepend(&ret, &buflen, "/proc", 5);
75327+ else
75328+ ret = strcpy(buf, "<path too long>");
75329+ spin_unlock(&vfsmount_lock);
75330+ spin_unlock(&dcache_lock);
75331+ return ret;
75332+}
75333+
75334+char *
75335+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
75336+{
75337+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
75338+ PAGE_SIZE);
75339+}
75340+
75341+char *
75342+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
75343+{
75344+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
75345+ PAGE_SIZE);
75346+}
75347+
75348+char *
75349+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
75350+{
75351+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
75352+ PAGE_SIZE);
75353+}
75354+
75355+char *
75356+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
75357+{
75358+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
75359+ PAGE_SIZE);
75360+}
75361+
75362+char *
75363+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
75364+{
75365+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
75366+ PAGE_SIZE);
75367+}
75368+
75369+__inline__ __u32
75370+to_gr_audit(const __u32 reqmode)
75371+{
75372+ /* masks off auditable permission flags, then shifts them to create
75373+ auditing flags, and adds the special case of append auditing if
75374+ we're requesting write */
75375+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
75376+}
75377+
75378+struct acl_subject_label *
75379+lookup_subject_map(const struct acl_subject_label *userp)
75380+{
75381+ unsigned int index = shash(userp, subj_map_set.s_size);
75382+ struct subject_map *match;
75383+
75384+ match = subj_map_set.s_hash[index];
75385+
75386+ while (match && match->user != userp)
75387+ match = match->next;
75388+
75389+ if (match != NULL)
75390+ return match->kernel;
75391+ else
75392+ return NULL;
75393+}
75394+
75395+static void
75396+insert_subj_map_entry(struct subject_map *subjmap)
75397+{
75398+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
75399+ struct subject_map **curr;
75400+
75401+ subjmap->prev = NULL;
75402+
75403+ curr = &subj_map_set.s_hash[index];
75404+ if (*curr != NULL)
75405+ (*curr)->prev = subjmap;
75406+
75407+ subjmap->next = *curr;
75408+ *curr = subjmap;
75409+
75410+ return;
75411+}
75412+
75413+static struct acl_role_label *
75414+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
75415+ const gid_t gid)
75416+{
75417+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
75418+ struct acl_role_label *match;
75419+ struct role_allowed_ip *ipp;
75420+ unsigned int x;
75421+ u32 curr_ip = task->signal->curr_ip;
75422+
75423+ task->signal->saved_ip = curr_ip;
75424+
75425+ match = acl_role_set.r_hash[index];
75426+
75427+ while (match) {
75428+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
75429+ for (x = 0; x < match->domain_child_num; x++) {
75430+ if (match->domain_children[x] == uid)
75431+ goto found;
75432+ }
75433+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
75434+ break;
75435+ match = match->next;
75436+ }
75437+found:
75438+ if (match == NULL) {
75439+ try_group:
75440+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
75441+ match = acl_role_set.r_hash[index];
75442+
75443+ while (match) {
75444+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
75445+ for (x = 0; x < match->domain_child_num; x++) {
75446+ if (match->domain_children[x] == gid)
75447+ goto found2;
75448+ }
75449+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
75450+ break;
75451+ match = match->next;
75452+ }
75453+found2:
75454+ if (match == NULL)
75455+ match = default_role;
75456+ if (match->allowed_ips == NULL)
75457+ return match;
75458+ else {
75459+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
75460+ if (likely
75461+ ((ntohl(curr_ip) & ipp->netmask) ==
75462+ (ntohl(ipp->addr) & ipp->netmask)))
75463+ return match;
75464+ }
75465+ match = default_role;
75466+ }
75467+ } else if (match->allowed_ips == NULL) {
75468+ return match;
75469+ } else {
75470+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
75471+ if (likely
75472+ ((ntohl(curr_ip) & ipp->netmask) ==
75473+ (ntohl(ipp->addr) & ipp->netmask)))
75474+ return match;
75475+ }
75476+ goto try_group;
75477+ }
75478+
75479+ return match;
75480+}
75481+
75482+struct acl_subject_label *
75483+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
75484+ const struct acl_role_label *role)
75485+{
75486+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
75487+ struct acl_subject_label *match;
75488+
75489+ match = role->subj_hash[index];
75490+
75491+ while (match && (match->inode != ino || match->device != dev ||
75492+ (match->mode & GR_DELETED))) {
75493+ match = match->next;
75494+ }
75495+
75496+ if (match && !(match->mode & GR_DELETED))
75497+ return match;
75498+ else
75499+ return NULL;
75500+}
75501+
75502+struct acl_subject_label *
75503+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
75504+ const struct acl_role_label *role)
75505+{
75506+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
75507+ struct acl_subject_label *match;
75508+
75509+ match = role->subj_hash[index];
75510+
75511+ while (match && (match->inode != ino || match->device != dev ||
75512+ !(match->mode & GR_DELETED))) {
75513+ match = match->next;
75514+ }
75515+
75516+ if (match && (match->mode & GR_DELETED))
75517+ return match;
75518+ else
75519+ return NULL;
75520+}
75521+
75522+static struct acl_object_label *
75523+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
75524+ const struct acl_subject_label *subj)
75525+{
75526+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
75527+ struct acl_object_label *match;
75528+
75529+ match = subj->obj_hash[index];
75530+
75531+ while (match && (match->inode != ino || match->device != dev ||
75532+ (match->mode & GR_DELETED))) {
75533+ match = match->next;
75534+ }
75535+
75536+ if (match && !(match->mode & GR_DELETED))
75537+ return match;
75538+ else
75539+ return NULL;
75540+}
75541+
75542+static struct acl_object_label *
75543+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
75544+ const struct acl_subject_label *subj)
75545+{
75546+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
75547+ struct acl_object_label *match;
75548+
75549+ match = subj->obj_hash[index];
75550+
75551+ while (match && (match->inode != ino || match->device != dev ||
75552+ !(match->mode & GR_DELETED))) {
75553+ match = match->next;
75554+ }
75555+
75556+ if (match && (match->mode & GR_DELETED))
75557+ return match;
75558+
75559+ match = subj->obj_hash[index];
75560+
75561+ while (match && (match->inode != ino || match->device != dev ||
75562+ (match->mode & GR_DELETED))) {
75563+ match = match->next;
75564+ }
75565+
75566+ if (match && !(match->mode & GR_DELETED))
75567+ return match;
75568+ else
75569+ return NULL;
75570+}
75571+
75572+static struct name_entry *
75573+lookup_name_entry(const char *name)
75574+{
75575+ unsigned int len = strlen(name);
75576+ unsigned int key = full_name_hash(name, len);
75577+ unsigned int index = key % name_set.n_size;
75578+ struct name_entry *match;
75579+
75580+ match = name_set.n_hash[index];
75581+
75582+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
75583+ match = match->next;
75584+
75585+ return match;
75586+}
75587+
75588+static struct name_entry *
75589+lookup_name_entry_create(const char *name)
75590+{
75591+ unsigned int len = strlen(name);
75592+ unsigned int key = full_name_hash(name, len);
75593+ unsigned int index = key % name_set.n_size;
75594+ struct name_entry *match;
75595+
75596+ match = name_set.n_hash[index];
75597+
75598+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
75599+ !match->deleted))
75600+ match = match->next;
75601+
75602+ if (match && match->deleted)
75603+ return match;
75604+
75605+ match = name_set.n_hash[index];
75606+
75607+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
75608+ match->deleted))
75609+ match = match->next;
75610+
75611+ if (match && !match->deleted)
75612+ return match;
75613+ else
75614+ return NULL;
75615+}
75616+
75617+static struct inodev_entry *
75618+lookup_inodev_entry(const ino_t ino, const dev_t dev)
75619+{
75620+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
75621+ struct inodev_entry *match;
75622+
75623+ match = inodev_set.i_hash[index];
75624+
75625+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
75626+ match = match->next;
75627+
75628+ return match;
75629+}
75630+
75631+static void
75632+insert_inodev_entry(struct inodev_entry *entry)
75633+{
75634+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
75635+ inodev_set.i_size);
75636+ struct inodev_entry **curr;
75637+
75638+ entry->prev = NULL;
75639+
75640+ curr = &inodev_set.i_hash[index];
75641+ if (*curr != NULL)
75642+ (*curr)->prev = entry;
75643+
75644+ entry->next = *curr;
75645+ *curr = entry;
75646+
75647+ return;
75648+}
75649+
75650+static void
75651+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
75652+{
75653+ unsigned int index =
75654+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
75655+ struct acl_role_label **curr;
75656+ struct acl_role_label *tmp;
75657+
75658+ curr = &acl_role_set.r_hash[index];
75659+
75660+ /* if role was already inserted due to domains and already has
75661+ a role in the same bucket as it attached, then we need to
75662+ combine these two buckets
75663+ */
75664+ if (role->next) {
75665+ tmp = role->next;
75666+ while (tmp->next)
75667+ tmp = tmp->next;
75668+ tmp->next = *curr;
75669+ } else
75670+ role->next = *curr;
75671+ *curr = role;
75672+
75673+ return;
75674+}
75675+
75676+static void
75677+insert_acl_role_label(struct acl_role_label *role)
75678+{
75679+ int i;
75680+
75681+ if (role_list == NULL) {
75682+ role_list = role;
75683+ role->prev = NULL;
75684+ } else {
75685+ role->prev = role_list;
75686+ role_list = role;
75687+ }
75688+
75689+ /* used for hash chains */
75690+ role->next = NULL;
75691+
75692+ if (role->roletype & GR_ROLE_DOMAIN) {
75693+ for (i = 0; i < role->domain_child_num; i++)
75694+ __insert_acl_role_label(role, role->domain_children[i]);
75695+ } else
75696+ __insert_acl_role_label(role, role->uidgid);
75697+}
75698+
75699+static int
75700+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
75701+{
75702+ struct name_entry **curr, *nentry;
75703+ struct inodev_entry *ientry;
75704+ unsigned int len = strlen(name);
75705+ unsigned int key = full_name_hash(name, len);
75706+ unsigned int index = key % name_set.n_size;
75707+
75708+ curr = &name_set.n_hash[index];
75709+
75710+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
75711+ curr = &((*curr)->next);
75712+
75713+ if (*curr != NULL)
75714+ return 1;
75715+
75716+ nentry = acl_alloc(sizeof (struct name_entry));
75717+ if (nentry == NULL)
75718+ return 0;
75719+ ientry = acl_alloc(sizeof (struct inodev_entry));
75720+ if (ientry == NULL)
75721+ return 0;
75722+ ientry->nentry = nentry;
75723+
75724+ nentry->key = key;
75725+ nentry->name = name;
75726+ nentry->inode = inode;
75727+ nentry->device = device;
75728+ nentry->len = len;
75729+ nentry->deleted = deleted;
75730+
75731+ nentry->prev = NULL;
75732+ curr = &name_set.n_hash[index];
75733+ if (*curr != NULL)
75734+ (*curr)->prev = nentry;
75735+ nentry->next = *curr;
75736+ *curr = nentry;
75737+
75738+ /* insert us into the table searchable by inode/dev */
75739+ insert_inodev_entry(ientry);
75740+
75741+ return 1;
75742+}
75743+
75744+static void
75745+insert_acl_obj_label(struct acl_object_label *obj,
75746+ struct acl_subject_label *subj)
75747+{
75748+ unsigned int index =
75749+ fhash(obj->inode, obj->device, subj->obj_hash_size);
75750+ struct acl_object_label **curr;
75751+
75752+
75753+ obj->prev = NULL;
75754+
75755+ curr = &subj->obj_hash[index];
75756+ if (*curr != NULL)
75757+ (*curr)->prev = obj;
75758+
75759+ obj->next = *curr;
75760+ *curr = obj;
75761+
75762+ return;
75763+}
75764+
75765+static void
75766+insert_acl_subj_label(struct acl_subject_label *obj,
75767+ struct acl_role_label *role)
75768+{
75769+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
75770+ struct acl_subject_label **curr;
75771+
75772+ obj->prev = NULL;
75773+
75774+ curr = &role->subj_hash[index];
75775+ if (*curr != NULL)
75776+ (*curr)->prev = obj;
75777+
75778+ obj->next = *curr;
75779+ *curr = obj;
75780+
75781+ return;
75782+}
75783+
75784+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
75785+
75786+static void *
75787+create_table(__u32 * len, int elementsize)
75788+{
75789+ unsigned int table_sizes[] = {
75790+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
75791+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
75792+ 4194301, 8388593, 16777213, 33554393, 67108859
75793+ };
75794+ void *newtable = NULL;
75795+ unsigned int pwr = 0;
75796+
75797+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
75798+ table_sizes[pwr] <= *len)
75799+ pwr++;
75800+
75801+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
75802+ return newtable;
75803+
75804+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
75805+ newtable =
75806+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
75807+ else
75808+ newtable = vmalloc(table_sizes[pwr] * elementsize);
75809+
75810+ *len = table_sizes[pwr];
75811+
75812+ return newtable;
75813+}
75814+
75815+static int
75816+init_variables(const struct gr_arg *arg)
75817+{
75818+ struct task_struct *reaper = &init_task;
75819+ unsigned int stacksize;
75820+
75821+ subj_map_set.s_size = arg->role_db.num_subjects;
75822+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
75823+ name_set.n_size = arg->role_db.num_objects;
75824+ inodev_set.i_size = arg->role_db.num_objects;
75825+
75826+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
75827+ !name_set.n_size || !inodev_set.i_size)
75828+ return 1;
75829+
75830+ if (!gr_init_uidset())
75831+ return 1;
75832+
75833+ /* set up the stack that holds allocation info */
75834+
75835+ stacksize = arg->role_db.num_pointers + 5;
75836+
75837+ if (!acl_alloc_stack_init(stacksize))
75838+ return 1;
75839+
75840+ /* grab reference for the real root dentry and vfsmount */
75841+ read_lock(&reaper->fs->lock);
75842+ real_root = dget(reaper->fs->root.dentry);
75843+ real_root_mnt = mntget(reaper->fs->root.mnt);
75844+ read_unlock(&reaper->fs->lock);
75845+
75846+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
75847+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
75848+#endif
75849+
75850+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
75851+ if (fakefs_obj_rw == NULL)
75852+ return 1;
75853+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
75854+
75855+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
75856+ if (fakefs_obj_rwx == NULL)
75857+ return 1;
75858+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
75859+
75860+ subj_map_set.s_hash =
75861+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
75862+ acl_role_set.r_hash =
75863+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
75864+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
75865+ inodev_set.i_hash =
75866+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
75867+
75868+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
75869+ !name_set.n_hash || !inodev_set.i_hash)
75870+ return 1;
75871+
75872+ memset(subj_map_set.s_hash, 0,
75873+ sizeof(struct subject_map *) * subj_map_set.s_size);
75874+ memset(acl_role_set.r_hash, 0,
75875+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
75876+ memset(name_set.n_hash, 0,
75877+ sizeof (struct name_entry *) * name_set.n_size);
75878+ memset(inodev_set.i_hash, 0,
75879+ sizeof (struct inodev_entry *) * inodev_set.i_size);
75880+
75881+ return 0;
75882+}
75883+
75884+/* free information not needed after startup
75885+ currently contains user->kernel pointer mappings for subjects
75886+*/
75887+
75888+static void
75889+free_init_variables(void)
75890+{
75891+ __u32 i;
75892+
75893+ if (subj_map_set.s_hash) {
75894+ for (i = 0; i < subj_map_set.s_size; i++) {
75895+ if (subj_map_set.s_hash[i]) {
75896+ kfree(subj_map_set.s_hash[i]);
75897+ subj_map_set.s_hash[i] = NULL;
75898+ }
75899+ }
75900+
75901+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
75902+ PAGE_SIZE)
75903+ kfree(subj_map_set.s_hash);
75904+ else
75905+ vfree(subj_map_set.s_hash);
75906+ }
75907+
75908+ return;
75909+}
75910+
75911+static void
75912+free_variables(void)
75913+{
75914+ struct acl_subject_label *s;
75915+ struct acl_role_label *r;
75916+ struct task_struct *task, *task2;
75917+ unsigned int x;
75918+
75919+ gr_clear_learn_entries();
75920+
75921+ read_lock(&tasklist_lock);
75922+ do_each_thread(task2, task) {
75923+ task->acl_sp_role = 0;
75924+ task->acl_role_id = 0;
75925+ task->acl = NULL;
75926+ task->role = NULL;
75927+ } while_each_thread(task2, task);
75928+ read_unlock(&tasklist_lock);
75929+
75930+ /* release the reference to the real root dentry and vfsmount */
75931+ if (real_root)
75932+ dput(real_root);
75933+ real_root = NULL;
75934+ if (real_root_mnt)
75935+ mntput(real_root_mnt);
75936+ real_root_mnt = NULL;
75937+
75938+ /* free all object hash tables */
75939+
75940+ FOR_EACH_ROLE_START(r)
75941+ if (r->subj_hash == NULL)
75942+ goto next_role;
75943+ FOR_EACH_SUBJECT_START(r, s, x)
75944+ if (s->obj_hash == NULL)
75945+ break;
75946+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75947+ kfree(s->obj_hash);
75948+ else
75949+ vfree(s->obj_hash);
75950+ FOR_EACH_SUBJECT_END(s, x)
75951+ FOR_EACH_NESTED_SUBJECT_START(r, s)
75952+ if (s->obj_hash == NULL)
75953+ break;
75954+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75955+ kfree(s->obj_hash);
75956+ else
75957+ vfree(s->obj_hash);
75958+ FOR_EACH_NESTED_SUBJECT_END(s)
75959+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
75960+ kfree(r->subj_hash);
75961+ else
75962+ vfree(r->subj_hash);
75963+ r->subj_hash = NULL;
75964+next_role:
75965+ FOR_EACH_ROLE_END(r)
75966+
75967+ acl_free_all();
75968+
75969+ if (acl_role_set.r_hash) {
75970+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
75971+ PAGE_SIZE)
75972+ kfree(acl_role_set.r_hash);
75973+ else
75974+ vfree(acl_role_set.r_hash);
75975+ }
75976+ if (name_set.n_hash) {
75977+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
75978+ PAGE_SIZE)
75979+ kfree(name_set.n_hash);
75980+ else
75981+ vfree(name_set.n_hash);
75982+ }
75983+
75984+ if (inodev_set.i_hash) {
75985+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
75986+ PAGE_SIZE)
75987+ kfree(inodev_set.i_hash);
75988+ else
75989+ vfree(inodev_set.i_hash);
75990+ }
75991+
75992+ gr_free_uidset();
75993+
75994+ memset(&name_set, 0, sizeof (struct name_db));
75995+ memset(&inodev_set, 0, sizeof (struct inodev_db));
75996+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
75997+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
75998+
75999+ default_role = NULL;
76000+ role_list = NULL;
76001+
76002+ return;
76003+}
76004+
76005+static __u32
76006+count_user_objs(struct acl_object_label *userp)
76007+{
76008+ struct acl_object_label o_tmp;
76009+ __u32 num = 0;
76010+
76011+ while (userp) {
76012+ if (copy_from_user(&o_tmp, userp,
76013+ sizeof (struct acl_object_label)))
76014+ break;
76015+
76016+ userp = o_tmp.prev;
76017+ num++;
76018+ }
76019+
76020+ return num;
76021+}
76022+
76023+static struct acl_subject_label *
76024+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
76025+
76026+static int
76027+copy_user_glob(struct acl_object_label *obj)
76028+{
76029+ struct acl_object_label *g_tmp, **guser;
76030+ unsigned int len;
76031+ char *tmp;
76032+
76033+ if (obj->globbed == NULL)
76034+ return 0;
76035+
76036+ guser = &obj->globbed;
76037+ while (*guser) {
76038+ g_tmp = (struct acl_object_label *)
76039+ acl_alloc(sizeof (struct acl_object_label));
76040+ if (g_tmp == NULL)
76041+ return -ENOMEM;
76042+
76043+ if (copy_from_user(g_tmp, *guser,
76044+ sizeof (struct acl_object_label)))
76045+ return -EFAULT;
76046+
76047+ len = strnlen_user(g_tmp->filename, PATH_MAX);
76048+
76049+ if (!len || len >= PATH_MAX)
76050+ return -EINVAL;
76051+
76052+ if ((tmp = (char *) acl_alloc(len)) == NULL)
76053+ return -ENOMEM;
76054+
76055+ if (copy_from_user(tmp, g_tmp->filename, len))
76056+ return -EFAULT;
76057+ tmp[len-1] = '\0';
76058+ g_tmp->filename = tmp;
76059+
76060+ *guser = g_tmp;
76061+ guser = &(g_tmp->next);
76062+ }
76063+
76064+ return 0;
76065+}
76066+
76067+static int
76068+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
76069+ struct acl_role_label *role)
76070+{
76071+ struct acl_object_label *o_tmp;
76072+ unsigned int len;
76073+ int ret;
76074+ char *tmp;
76075+
76076+ while (userp) {
76077+ if ((o_tmp = (struct acl_object_label *)
76078+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
76079+ return -ENOMEM;
76080+
76081+ if (copy_from_user(o_tmp, userp,
76082+ sizeof (struct acl_object_label)))
76083+ return -EFAULT;
76084+
76085+ userp = o_tmp->prev;
76086+
76087+ len = strnlen_user(o_tmp->filename, PATH_MAX);
76088+
76089+ if (!len || len >= PATH_MAX)
76090+ return -EINVAL;
76091+
76092+ if ((tmp = (char *) acl_alloc(len)) == NULL)
76093+ return -ENOMEM;
76094+
76095+ if (copy_from_user(tmp, o_tmp->filename, len))
76096+ return -EFAULT;
76097+ tmp[len-1] = '\0';
76098+ o_tmp->filename = tmp;
76099+
76100+ insert_acl_obj_label(o_tmp, subj);
76101+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
76102+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
76103+ return -ENOMEM;
76104+
76105+ ret = copy_user_glob(o_tmp);
76106+ if (ret)
76107+ return ret;
76108+
76109+ if (o_tmp->nested) {
76110+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
76111+ if (IS_ERR(o_tmp->nested))
76112+ return PTR_ERR(o_tmp->nested);
76113+
76114+ /* insert into nested subject list */
76115+ o_tmp->nested->next = role->hash->first;
76116+ role->hash->first = o_tmp->nested;
76117+ }
76118+ }
76119+
76120+ return 0;
76121+}
76122+
76123+static __u32
76124+count_user_subjs(struct acl_subject_label *userp)
76125+{
76126+ struct acl_subject_label s_tmp;
76127+ __u32 num = 0;
76128+
76129+ while (userp) {
76130+ if (copy_from_user(&s_tmp, userp,
76131+ sizeof (struct acl_subject_label)))
76132+ break;
76133+
76134+ userp = s_tmp.prev;
76135+ /* do not count nested subjects against this count, since
76136+ they are not included in the hash table, but are
76137+ attached to objects. We have already counted
76138+ the subjects in userspace for the allocation
76139+ stack
76140+ */
76141+ if (!(s_tmp.mode & GR_NESTED))
76142+ num++;
76143+ }
76144+
76145+ return num;
76146+}
76147+
76148+static int
76149+copy_user_allowedips(struct acl_role_label *rolep)
76150+{
76151+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
76152+
76153+ ruserip = rolep->allowed_ips;
76154+
76155+ while (ruserip) {
76156+ rlast = rtmp;
76157+
76158+ if ((rtmp = (struct role_allowed_ip *)
76159+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
76160+ return -ENOMEM;
76161+
76162+ if (copy_from_user(rtmp, ruserip,
76163+ sizeof (struct role_allowed_ip)))
76164+ return -EFAULT;
76165+
76166+ ruserip = rtmp->prev;
76167+
76168+ if (!rlast) {
76169+ rtmp->prev = NULL;
76170+ rolep->allowed_ips = rtmp;
76171+ } else {
76172+ rlast->next = rtmp;
76173+ rtmp->prev = rlast;
76174+ }
76175+
76176+ if (!ruserip)
76177+ rtmp->next = NULL;
76178+ }
76179+
76180+ return 0;
76181+}
76182+
76183+static int
76184+copy_user_transitions(struct acl_role_label *rolep)
76185+{
76186+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
76187+
76188+ unsigned int len;
76189+ char *tmp;
76190+
76191+ rusertp = rolep->transitions;
76192+
76193+ while (rusertp) {
76194+ rlast = rtmp;
76195+
76196+ if ((rtmp = (struct role_transition *)
76197+ acl_alloc(sizeof (struct role_transition))) == NULL)
76198+ return -ENOMEM;
76199+
76200+ if (copy_from_user(rtmp, rusertp,
76201+ sizeof (struct role_transition)))
76202+ return -EFAULT;
76203+
76204+ rusertp = rtmp->prev;
76205+
76206+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
76207+
76208+ if (!len || len >= GR_SPROLE_LEN)
76209+ return -EINVAL;
76210+
76211+ if ((tmp = (char *) acl_alloc(len)) == NULL)
76212+ return -ENOMEM;
76213+
76214+ if (copy_from_user(tmp, rtmp->rolename, len))
76215+ return -EFAULT;
76216+ tmp[len-1] = '\0';
76217+ rtmp->rolename = tmp;
76218+
76219+ if (!rlast) {
76220+ rtmp->prev = NULL;
76221+ rolep->transitions = rtmp;
76222+ } else {
76223+ rlast->next = rtmp;
76224+ rtmp->prev = rlast;
76225+ }
76226+
76227+ if (!rusertp)
76228+ rtmp->next = NULL;
76229+ }
76230+
76231+ return 0;
76232+}
76233+
76234+static struct acl_subject_label *
76235+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
76236+{
76237+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
76238+ unsigned int len;
76239+ char *tmp;
76240+ __u32 num_objs;
76241+ struct acl_ip_label **i_tmp, *i_utmp2;
76242+ struct gr_hash_struct ghash;
76243+ struct subject_map *subjmap;
76244+ unsigned int i_num;
76245+ int err;
76246+
76247+ s_tmp = lookup_subject_map(userp);
76248+
76249+ /* we've already copied this subject into the kernel, just return
76250+ the reference to it, and don't copy it over again
76251+ */
76252+ if (s_tmp)
76253+ return(s_tmp);
76254+
76255+ if ((s_tmp = (struct acl_subject_label *)
76256+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
76257+ return ERR_PTR(-ENOMEM);
76258+
76259+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
76260+ if (subjmap == NULL)
76261+ return ERR_PTR(-ENOMEM);
76262+
76263+ subjmap->user = userp;
76264+ subjmap->kernel = s_tmp;
76265+ insert_subj_map_entry(subjmap);
76266+
76267+ if (copy_from_user(s_tmp, userp,
76268+ sizeof (struct acl_subject_label)))
76269+ return ERR_PTR(-EFAULT);
76270+
76271+ len = strnlen_user(s_tmp->filename, PATH_MAX);
76272+
76273+ if (!len || len >= PATH_MAX)
76274+ return ERR_PTR(-EINVAL);
76275+
76276+ if ((tmp = (char *) acl_alloc(len)) == NULL)
76277+ return ERR_PTR(-ENOMEM);
76278+
76279+ if (copy_from_user(tmp, s_tmp->filename, len))
76280+ return ERR_PTR(-EFAULT);
76281+ tmp[len-1] = '\0';
76282+ s_tmp->filename = tmp;
76283+
76284+ if (!strcmp(s_tmp->filename, "/"))
76285+ role->root_label = s_tmp;
76286+
76287+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
76288+ return ERR_PTR(-EFAULT);
76289+
76290+ /* copy user and group transition tables */
76291+
76292+ if (s_tmp->user_trans_num) {
76293+ uid_t *uidlist;
76294+
76295+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
76296+ if (uidlist == NULL)
76297+ return ERR_PTR(-ENOMEM);
76298+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
76299+ return ERR_PTR(-EFAULT);
76300+
76301+ s_tmp->user_transitions = uidlist;
76302+ }
76303+
76304+ if (s_tmp->group_trans_num) {
76305+ gid_t *gidlist;
76306+
76307+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
76308+ if (gidlist == NULL)
76309+ return ERR_PTR(-ENOMEM);
76310+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
76311+ return ERR_PTR(-EFAULT);
76312+
76313+ s_tmp->group_transitions = gidlist;
76314+ }
76315+
76316+ /* set up object hash table */
76317+ num_objs = count_user_objs(ghash.first);
76318+
76319+ s_tmp->obj_hash_size = num_objs;
76320+ s_tmp->obj_hash =
76321+ (struct acl_object_label **)
76322+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
76323+
76324+ if (!s_tmp->obj_hash)
76325+ return ERR_PTR(-ENOMEM);
76326+
76327+ memset(s_tmp->obj_hash, 0,
76328+ s_tmp->obj_hash_size *
76329+ sizeof (struct acl_object_label *));
76330+
76331+ /* add in objects */
76332+ err = copy_user_objs(ghash.first, s_tmp, role);
76333+
76334+ if (err)
76335+ return ERR_PTR(err);
76336+
76337+ /* set pointer for parent subject */
76338+ if (s_tmp->parent_subject) {
76339+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
76340+
76341+ if (IS_ERR(s_tmp2))
76342+ return s_tmp2;
76343+
76344+ s_tmp->parent_subject = s_tmp2;
76345+ }
76346+
76347+ /* add in ip acls */
76348+
76349+ if (!s_tmp->ip_num) {
76350+ s_tmp->ips = NULL;
76351+ goto insert;
76352+ }
76353+
76354+ i_tmp =
76355+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
76356+ sizeof (struct acl_ip_label *));
76357+
76358+ if (!i_tmp)
76359+ return ERR_PTR(-ENOMEM);
76360+
76361+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
76362+ *(i_tmp + i_num) =
76363+ (struct acl_ip_label *)
76364+ acl_alloc(sizeof (struct acl_ip_label));
76365+ if (!*(i_tmp + i_num))
76366+ return ERR_PTR(-ENOMEM);
76367+
76368+ if (copy_from_user
76369+ (&i_utmp2, s_tmp->ips + i_num,
76370+ sizeof (struct acl_ip_label *)))
76371+ return ERR_PTR(-EFAULT);
76372+
76373+ if (copy_from_user
76374+ (*(i_tmp + i_num), i_utmp2,
76375+ sizeof (struct acl_ip_label)))
76376+ return ERR_PTR(-EFAULT);
76377+
76378+ if ((*(i_tmp + i_num))->iface == NULL)
76379+ continue;
76380+
76381+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
76382+ if (!len || len >= IFNAMSIZ)
76383+ return ERR_PTR(-EINVAL);
76384+ tmp = acl_alloc(len);
76385+ if (tmp == NULL)
76386+ return ERR_PTR(-ENOMEM);
76387+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
76388+ return ERR_PTR(-EFAULT);
76389+ (*(i_tmp + i_num))->iface = tmp;
76390+ }
76391+
76392+ s_tmp->ips = i_tmp;
76393+
76394+insert:
76395+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
76396+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
76397+ return ERR_PTR(-ENOMEM);
76398+
76399+ return s_tmp;
76400+}
76401+
76402+static int
76403+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
76404+{
76405+ struct acl_subject_label s_pre;
76406+ struct acl_subject_label * ret;
76407+ int err;
76408+
76409+ while (userp) {
76410+ if (copy_from_user(&s_pre, userp,
76411+ sizeof (struct acl_subject_label)))
76412+ return -EFAULT;
76413+
76414+ /* do not add nested subjects here, add
76415+ while parsing objects
76416+ */
76417+
76418+ if (s_pre.mode & GR_NESTED) {
76419+ userp = s_pre.prev;
76420+ continue;
76421+ }
76422+
76423+ ret = do_copy_user_subj(userp, role);
76424+
76425+ err = PTR_ERR(ret);
76426+ if (IS_ERR(ret))
76427+ return err;
76428+
76429+ insert_acl_subj_label(ret, role);
76430+
76431+ userp = s_pre.prev;
76432+ }
76433+
76434+ return 0;
76435+}
76436+
76437+static int
76438+copy_user_acl(struct gr_arg *arg)
76439+{
76440+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
76441+ struct sprole_pw *sptmp;
76442+ struct gr_hash_struct *ghash;
76443+ uid_t *domainlist;
76444+ unsigned int r_num;
76445+ unsigned int len;
76446+ char *tmp;
76447+ int err = 0;
76448+ __u16 i;
76449+ __u32 num_subjs;
76450+
76451+ /* we need a default and kernel role */
76452+ if (arg->role_db.num_roles < 2)
76453+ return -EINVAL;
76454+
76455+ /* copy special role authentication info from userspace */
76456+
76457+ num_sprole_pws = arg->num_sprole_pws;
76458+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
76459+
76460+ if (!acl_special_roles) {
76461+ err = -ENOMEM;
76462+ goto cleanup;
76463+ }
76464+
76465+ for (i = 0; i < num_sprole_pws; i++) {
76466+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
76467+ if (!sptmp) {
76468+ err = -ENOMEM;
76469+ goto cleanup;
76470+ }
76471+ if (copy_from_user(sptmp, arg->sprole_pws + i,
76472+ sizeof (struct sprole_pw))) {
76473+ err = -EFAULT;
76474+ goto cleanup;
76475+ }
76476+
76477+ len =
76478+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
76479+
76480+ if (!len || len >= GR_SPROLE_LEN) {
76481+ err = -EINVAL;
76482+ goto cleanup;
76483+ }
76484+
76485+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
76486+ err = -ENOMEM;
76487+ goto cleanup;
76488+ }
76489+
76490+ if (copy_from_user(tmp, sptmp->rolename, len)) {
76491+ err = -EFAULT;
76492+ goto cleanup;
76493+ }
76494+ tmp[len-1] = '\0';
76495+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
76496+ printk(KERN_ALERT "Copying special role %s\n", tmp);
76497+#endif
76498+ sptmp->rolename = tmp;
76499+ acl_special_roles[i] = sptmp;
76500+ }
76501+
76502+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
76503+
76504+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
76505+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
76506+
76507+ if (!r_tmp) {
76508+ err = -ENOMEM;
76509+ goto cleanup;
76510+ }
76511+
76512+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
76513+ sizeof (struct acl_role_label *))) {
76514+ err = -EFAULT;
76515+ goto cleanup;
76516+ }
76517+
76518+ if (copy_from_user(r_tmp, r_utmp2,
76519+ sizeof (struct acl_role_label))) {
76520+ err = -EFAULT;
76521+ goto cleanup;
76522+ }
76523+
76524+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
76525+
76526+ if (!len || len >= PATH_MAX) {
76527+ err = -EINVAL;
76528+ goto cleanup;
76529+ }
76530+
76531+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
76532+ err = -ENOMEM;
76533+ goto cleanup;
76534+ }
76535+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
76536+ err = -EFAULT;
76537+ goto cleanup;
76538+ }
76539+ tmp[len-1] = '\0';
76540+ r_tmp->rolename = tmp;
76541+
76542+ if (!strcmp(r_tmp->rolename, "default")
76543+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
76544+ default_role = r_tmp;
76545+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
76546+ kernel_role = r_tmp;
76547+ }
76548+
76549+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
76550+ err = -ENOMEM;
76551+ goto cleanup;
76552+ }
76553+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
76554+ err = -EFAULT;
76555+ goto cleanup;
76556+ }
76557+
76558+ r_tmp->hash = ghash;
76559+
76560+ num_subjs = count_user_subjs(r_tmp->hash->first);
76561+
76562+ r_tmp->subj_hash_size = num_subjs;
76563+ r_tmp->subj_hash =
76564+ (struct acl_subject_label **)
76565+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
76566+
76567+ if (!r_tmp->subj_hash) {
76568+ err = -ENOMEM;
76569+ goto cleanup;
76570+ }
76571+
76572+ err = copy_user_allowedips(r_tmp);
76573+ if (err)
76574+ goto cleanup;
76575+
76576+ /* copy domain info */
76577+ if (r_tmp->domain_children != NULL) {
76578+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
76579+ if (domainlist == NULL) {
76580+ err = -ENOMEM;
76581+ goto cleanup;
76582+ }
76583+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
76584+ err = -EFAULT;
76585+ goto cleanup;
76586+ }
76587+ r_tmp->domain_children = domainlist;
76588+ }
76589+
76590+ err = copy_user_transitions(r_tmp);
76591+ if (err)
76592+ goto cleanup;
76593+
76594+ memset(r_tmp->subj_hash, 0,
76595+ r_tmp->subj_hash_size *
76596+ sizeof (struct acl_subject_label *));
76597+
76598+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
76599+
76600+ if (err)
76601+ goto cleanup;
76602+
76603+ /* set nested subject list to null */
76604+ r_tmp->hash->first = NULL;
76605+
76606+ insert_acl_role_label(r_tmp);
76607+ }
76608+
76609+ goto return_err;
76610+ cleanup:
76611+ free_variables();
76612+ return_err:
76613+ return err;
76614+
76615+}
76616+
76617+static int
76618+gracl_init(struct gr_arg *args)
76619+{
76620+ int error = 0;
76621+
76622+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
76623+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
76624+
76625+ if (init_variables(args)) {
76626+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
76627+ error = -ENOMEM;
76628+ free_variables();
76629+ goto out;
76630+ }
76631+
76632+ error = copy_user_acl(args);
76633+ free_init_variables();
76634+ if (error) {
76635+ free_variables();
76636+ goto out;
76637+ }
76638+
76639+ if ((error = gr_set_acls(0))) {
76640+ free_variables();
76641+ goto out;
76642+ }
76643+
76644+ pax_open_kernel();
76645+ gr_status |= GR_READY;
76646+ pax_close_kernel();
76647+
76648+ out:
76649+ return error;
76650+}
76651+
76652+/* derived from glibc fnmatch() 0: match, 1: no match*/
76653+
76654+static int
76655+glob_match(const char *p, const char *n)
76656+{
76657+ char c;
76658+
76659+ while ((c = *p++) != '\0') {
76660+ switch (c) {
76661+ case '?':
76662+ if (*n == '\0')
76663+ return 1;
76664+ else if (*n == '/')
76665+ return 1;
76666+ break;
76667+ case '\\':
76668+ if (*n != c)
76669+ return 1;
76670+ break;
76671+ case '*':
76672+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
76673+ if (*n == '/')
76674+ return 1;
76675+ else if (c == '?') {
76676+ if (*n == '\0')
76677+ return 1;
76678+ else
76679+ ++n;
76680+ }
76681+ }
76682+ if (c == '\0') {
76683+ return 0;
76684+ } else {
76685+ const char *endp;
76686+
76687+ if ((endp = strchr(n, '/')) == NULL)
76688+ endp = n + strlen(n);
76689+
76690+ if (c == '[') {
76691+ for (--p; n < endp; ++n)
76692+ if (!glob_match(p, n))
76693+ return 0;
76694+ } else if (c == '/') {
76695+ while (*n != '\0' && *n != '/')
76696+ ++n;
76697+ if (*n == '/' && !glob_match(p, n + 1))
76698+ return 0;
76699+ } else {
76700+ for (--p; n < endp; ++n)
76701+ if (*n == c && !glob_match(p, n))
76702+ return 0;
76703+ }
76704+
76705+ return 1;
76706+ }
76707+ case '[':
76708+ {
76709+ int not;
76710+ char cold;
76711+
76712+ if (*n == '\0' || *n == '/')
76713+ return 1;
76714+
76715+ not = (*p == '!' || *p == '^');
76716+ if (not)
76717+ ++p;
76718+
76719+ c = *p++;
76720+ for (;;) {
76721+ unsigned char fn = (unsigned char)*n;
76722+
76723+ if (c == '\0')
76724+ return 1;
76725+ else {
76726+ if (c == fn)
76727+ goto matched;
76728+ cold = c;
76729+ c = *p++;
76730+
76731+ if (c == '-' && *p != ']') {
76732+ unsigned char cend = *p++;
76733+
76734+ if (cend == '\0')
76735+ return 1;
76736+
76737+ if (cold <= fn && fn <= cend)
76738+ goto matched;
76739+
76740+ c = *p++;
76741+ }
76742+ }
76743+
76744+ if (c == ']')
76745+ break;
76746+ }
76747+ if (!not)
76748+ return 1;
76749+ break;
76750+ matched:
76751+ while (c != ']') {
76752+ if (c == '\0')
76753+ return 1;
76754+
76755+ c = *p++;
76756+ }
76757+ if (not)
76758+ return 1;
76759+ }
76760+ break;
76761+ default:
76762+ if (c != *n)
76763+ return 1;
76764+ }
76765+
76766+ ++n;
76767+ }
76768+
76769+ if (*n == '\0')
76770+ return 0;
76771+
76772+ if (*n == '/')
76773+ return 0;
76774+
76775+ return 1;
76776+}
76777+
76778+static struct acl_object_label *
76779+chk_glob_label(struct acl_object_label *globbed,
76780+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
76781+{
76782+ struct acl_object_label *tmp;
76783+
76784+ if (*path == NULL)
76785+ *path = gr_to_filename_nolock(dentry, mnt);
76786+
76787+ tmp = globbed;
76788+
76789+ while (tmp) {
76790+ if (!glob_match(tmp->filename, *path))
76791+ return tmp;
76792+ tmp = tmp->next;
76793+ }
76794+
76795+ return NULL;
76796+}
76797+
76798+static struct acl_object_label *
76799+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
76800+ const ino_t curr_ino, const dev_t curr_dev,
76801+ const struct acl_subject_label *subj, char **path, const int checkglob)
76802+{
76803+ struct acl_subject_label *tmpsubj;
76804+ struct acl_object_label *retval;
76805+ struct acl_object_label *retval2;
76806+
76807+ tmpsubj = (struct acl_subject_label *) subj;
76808+ read_lock(&gr_inode_lock);
76809+ do {
76810+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
76811+ if (retval) {
76812+ if (checkglob && retval->globbed) {
76813+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
76814+ if (retval2)
76815+ retval = retval2;
76816+ }
76817+ break;
76818+ }
76819+ } while ((tmpsubj = tmpsubj->parent_subject));
76820+ read_unlock(&gr_inode_lock);
76821+
76822+ return retval;
76823+}
76824+
76825+static __inline__ struct acl_object_label *
76826+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
76827+ const struct dentry *curr_dentry,
76828+ const struct acl_subject_label *subj, char **path, const int checkglob)
76829+{
76830+ int newglob = checkglob;
76831+
76832+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
76833+ as we don't want a / * rule to match instead of the / object
76834+ don't do this for create lookups that call this function though, since they're looking up
76835+ on the parent and thus need globbing checks on all paths
76836+ */
76837+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
76838+ newglob = GR_NO_GLOB;
76839+
76840+ return __full_lookup(orig_dentry, orig_mnt,
76841+ curr_dentry->d_inode->i_ino,
76842+ __get_dev(curr_dentry), subj, path, newglob);
76843+}
76844+
76845+static struct acl_object_label *
76846+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76847+ const struct acl_subject_label *subj, char *path, const int checkglob)
76848+{
76849+ struct dentry *dentry = (struct dentry *) l_dentry;
76850+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
76851+ struct acl_object_label *retval;
76852+
76853+ spin_lock(&dcache_lock);
76854+ spin_lock(&vfsmount_lock);
76855+
76856+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
76857+#ifdef CONFIG_NET
76858+ mnt == sock_mnt ||
76859+#endif
76860+#ifdef CONFIG_HUGETLBFS
76861+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
76862+#endif
76863+ /* ignore Eric Biederman */
76864+ IS_PRIVATE(l_dentry->d_inode))) {
76865+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
76866+ goto out;
76867+ }
76868+
76869+ for (;;) {
76870+ if (dentry == real_root && mnt == real_root_mnt)
76871+ break;
76872+
76873+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
76874+ if (mnt->mnt_parent == mnt)
76875+ break;
76876+
76877+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
76878+ if (retval != NULL)
76879+ goto out;
76880+
76881+ dentry = mnt->mnt_mountpoint;
76882+ mnt = mnt->mnt_parent;
76883+ continue;
76884+ }
76885+
76886+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
76887+ if (retval != NULL)
76888+ goto out;
76889+
76890+ dentry = dentry->d_parent;
76891+ }
76892+
76893+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
76894+
76895+ if (retval == NULL)
76896+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
76897+out:
76898+ spin_unlock(&vfsmount_lock);
76899+ spin_unlock(&dcache_lock);
76900+
76901+ BUG_ON(retval == NULL);
76902+
76903+ return retval;
76904+}
76905+
76906+static __inline__ struct acl_object_label *
76907+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76908+ const struct acl_subject_label *subj)
76909+{
76910+ char *path = NULL;
76911+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
76912+}
76913+
76914+static __inline__ struct acl_object_label *
76915+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76916+ const struct acl_subject_label *subj)
76917+{
76918+ char *path = NULL;
76919+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
76920+}
76921+
76922+static __inline__ struct acl_object_label *
76923+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76924+ const struct acl_subject_label *subj, char *path)
76925+{
76926+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
76927+}
76928+
76929+static struct acl_subject_label *
76930+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76931+ const struct acl_role_label *role)
76932+{
76933+ struct dentry *dentry = (struct dentry *) l_dentry;
76934+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
76935+ struct acl_subject_label *retval;
76936+
76937+ spin_lock(&dcache_lock);
76938+ spin_lock(&vfsmount_lock);
76939+
76940+ for (;;) {
76941+ if (dentry == real_root && mnt == real_root_mnt)
76942+ break;
76943+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
76944+ if (mnt->mnt_parent == mnt)
76945+ break;
76946+
76947+ read_lock(&gr_inode_lock);
76948+ retval =
76949+ lookup_acl_subj_label(dentry->d_inode->i_ino,
76950+ __get_dev(dentry), role);
76951+ read_unlock(&gr_inode_lock);
76952+ if (retval != NULL)
76953+ goto out;
76954+
76955+ dentry = mnt->mnt_mountpoint;
76956+ mnt = mnt->mnt_parent;
76957+ continue;
76958+ }
76959+
76960+ read_lock(&gr_inode_lock);
76961+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
76962+ __get_dev(dentry), role);
76963+ read_unlock(&gr_inode_lock);
76964+ if (retval != NULL)
76965+ goto out;
76966+
76967+ dentry = dentry->d_parent;
76968+ }
76969+
76970+ read_lock(&gr_inode_lock);
76971+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
76972+ __get_dev(dentry), role);
76973+ read_unlock(&gr_inode_lock);
76974+
76975+ if (unlikely(retval == NULL)) {
76976+ read_lock(&gr_inode_lock);
76977+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
76978+ __get_dev(real_root), role);
76979+ read_unlock(&gr_inode_lock);
76980+ }
76981+out:
76982+ spin_unlock(&vfsmount_lock);
76983+ spin_unlock(&dcache_lock);
76984+
76985+ BUG_ON(retval == NULL);
76986+
76987+ return retval;
76988+}
76989+
76990+static void
76991+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
76992+{
76993+ struct task_struct *task = current;
76994+ const struct cred *cred = current_cred();
76995+
76996+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
76997+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
76998+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
76999+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
77000+
77001+ return;
77002+}
77003+
77004+static void
77005+gr_log_learn_sysctl(const char *path, const __u32 mode)
77006+{
77007+ struct task_struct *task = current;
77008+ const struct cred *cred = current_cred();
77009+
77010+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
77011+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
77012+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
77013+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
77014+
77015+ return;
77016+}
77017+
77018+static void
77019+gr_log_learn_id_change(const char type, const unsigned int real,
77020+ const unsigned int effective, const unsigned int fs)
77021+{
77022+ struct task_struct *task = current;
77023+ const struct cred *cred = current_cred();
77024+
77025+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
77026+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
77027+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
77028+ type, real, effective, fs, &task->signal->saved_ip);
77029+
77030+ return;
77031+}
77032+
77033+__u32
77034+gr_search_file(const struct dentry * dentry, const __u32 mode,
77035+ const struct vfsmount * mnt)
77036+{
77037+ __u32 retval = mode;
77038+ struct acl_subject_label *curracl;
77039+ struct acl_object_label *currobj;
77040+
77041+ if (unlikely(!(gr_status & GR_READY)))
77042+ return (mode & ~GR_AUDITS);
77043+
77044+ curracl = current->acl;
77045+
77046+ currobj = chk_obj_label(dentry, mnt, curracl);
77047+ retval = currobj->mode & mode;
77048+
77049+ /* if we're opening a specified transfer file for writing
77050+ (e.g. /dev/initctl), then transfer our role to init
77051+ */
77052+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
77053+ current->role->roletype & GR_ROLE_PERSIST)) {
77054+ struct task_struct *task = init_pid_ns.child_reaper;
77055+
77056+ if (task->role != current->role) {
77057+ task->acl_sp_role = 0;
77058+ task->acl_role_id = current->acl_role_id;
77059+ task->role = current->role;
77060+ rcu_read_lock();
77061+ read_lock(&grsec_exec_file_lock);
77062+ gr_apply_subject_to_task(task);
77063+ read_unlock(&grsec_exec_file_lock);
77064+ rcu_read_unlock();
77065+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
77066+ }
77067+ }
77068+
77069+ if (unlikely
77070+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
77071+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
77072+ __u32 new_mode = mode;
77073+
77074+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
77075+
77076+ retval = new_mode;
77077+
77078+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
77079+ new_mode |= GR_INHERIT;
77080+
77081+ if (!(mode & GR_NOLEARN))
77082+ gr_log_learn(dentry, mnt, new_mode);
77083+ }
77084+
77085+ return retval;
77086+}
77087+
77088+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
77089+ const struct dentry *parent,
77090+ const struct vfsmount *mnt)
77091+{
77092+ struct name_entry *match;
77093+ struct acl_object_label *matchpo;
77094+ struct acl_subject_label *curracl;
77095+ char *path;
77096+
77097+ if (unlikely(!(gr_status & GR_READY)))
77098+ return NULL;
77099+
77100+ preempt_disable();
77101+ path = gr_to_filename_rbac(new_dentry, mnt);
77102+ match = lookup_name_entry_create(path);
77103+
77104+ curracl = current->acl;
77105+
77106+ if (match) {
77107+ read_lock(&gr_inode_lock);
77108+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
77109+ read_unlock(&gr_inode_lock);
77110+
77111+ if (matchpo) {
77112+ preempt_enable();
77113+ return matchpo;
77114+ }
77115+ }
77116+
77117+ // lookup parent
77118+
77119+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
77120+
77121+ preempt_enable();
77122+ return matchpo;
77123+}
77124+
77125+__u32
77126+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
77127+ const struct vfsmount * mnt, const __u32 mode)
77128+{
77129+ struct acl_object_label *matchpo;
77130+ __u32 retval;
77131+
77132+ if (unlikely(!(gr_status & GR_READY)))
77133+ return (mode & ~GR_AUDITS);
77134+
77135+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
77136+
77137+ retval = matchpo->mode & mode;
77138+
77139+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
77140+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
77141+ __u32 new_mode = mode;
77142+
77143+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
77144+
77145+ gr_log_learn(new_dentry, mnt, new_mode);
77146+ return new_mode;
77147+ }
77148+
77149+ return retval;
77150+}
77151+
77152+__u32
77153+gr_check_link(const struct dentry * new_dentry,
77154+ const struct dentry * parent_dentry,
77155+ const struct vfsmount * parent_mnt,
77156+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
77157+{
77158+ struct acl_object_label *obj;
77159+ __u32 oldmode, newmode;
77160+ __u32 needmode;
77161+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
77162+ GR_DELETE | GR_INHERIT;
77163+
77164+ if (unlikely(!(gr_status & GR_READY)))
77165+ return (GR_CREATE | GR_LINK);
77166+
77167+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
77168+ oldmode = obj->mode;
77169+
77170+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
77171+ newmode = obj->mode;
77172+
77173+ needmode = newmode & checkmodes;
77174+
77175+ // old name for hardlink must have at least the permissions of the new name
77176+ if ((oldmode & needmode) != needmode)
77177+ goto bad;
77178+
77179+ // if old name had restrictions/auditing, make sure the new name does as well
77180+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
77181+
77182+ // don't allow hardlinking of suid/sgid files without permission
77183+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
77184+ needmode |= GR_SETID;
77185+
77186+ if ((newmode & needmode) != needmode)
77187+ goto bad;
77188+
77189+ // enforce minimum permissions
77190+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
77191+ return newmode;
77192+bad:
77193+ needmode = oldmode;
77194+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
77195+ needmode |= GR_SETID;
77196+
77197+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
77198+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
77199+ return (GR_CREATE | GR_LINK);
77200+ } else if (newmode & GR_SUPPRESS)
77201+ return GR_SUPPRESS;
77202+ else
77203+ return 0;
77204+}
77205+
77206+int
77207+gr_check_hidden_task(const struct task_struct *task)
77208+{
77209+ if (unlikely(!(gr_status & GR_READY)))
77210+ return 0;
77211+
77212+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
77213+ return 1;
77214+
77215+ return 0;
77216+}
77217+
77218+int
77219+gr_check_protected_task(const struct task_struct *task)
77220+{
77221+ if (unlikely(!(gr_status & GR_READY) || !task))
77222+ return 0;
77223+
77224+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
77225+ task->acl != current->acl)
77226+ return 1;
77227+
77228+ return 0;
77229+}
77230+
77231+int
77232+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
77233+{
77234+ struct task_struct *p;
77235+ int ret = 0;
77236+
77237+ if (unlikely(!(gr_status & GR_READY) || !pid))
77238+ return ret;
77239+
77240+ read_lock(&tasklist_lock);
77241+ do_each_pid_task(pid, type, p) {
77242+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
77243+ p->acl != current->acl) {
77244+ ret = 1;
77245+ goto out;
77246+ }
77247+ } while_each_pid_task(pid, type, p);
77248+out:
77249+ read_unlock(&tasklist_lock);
77250+
77251+ return ret;
77252+}
77253+
77254+void
77255+gr_copy_label(struct task_struct *tsk)
77256+{
77257+ /* plain copying of fields is already done by dup_task_struct */
77258+ tsk->signal->used_accept = 0;
77259+ tsk->acl_sp_role = 0;
77260+ //tsk->acl_role_id = current->acl_role_id;
77261+ //tsk->acl = current->acl;
77262+ //tsk->role = current->role;
77263+ tsk->signal->curr_ip = current->signal->curr_ip;
77264+ tsk->signal->saved_ip = current->signal->saved_ip;
77265+ if (current->exec_file)
77266+ get_file(current->exec_file);
77267+ //tsk->exec_file = current->exec_file;
77268+ //tsk->is_writable = current->is_writable;
77269+ if (unlikely(current->signal->used_accept)) {
77270+ current->signal->curr_ip = 0;
77271+ current->signal->saved_ip = 0;
77272+ }
77273+
77274+ return;
77275+}
77276+
77277+static void
77278+gr_set_proc_res(struct task_struct *task)
77279+{
77280+ struct acl_subject_label *proc;
77281+ unsigned short i;
77282+
77283+ proc = task->acl;
77284+
77285+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
77286+ return;
77287+
77288+ for (i = 0; i < RLIM_NLIMITS; i++) {
77289+ if (!(proc->resmask & (1 << i)))
77290+ continue;
77291+
77292+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
77293+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
77294+ }
77295+
77296+ return;
77297+}
77298+
77299+extern int __gr_process_user_ban(struct user_struct *user);
77300+
77301+int
77302+gr_check_user_change(int real, int effective, int fs)
77303+{
77304+ unsigned int i;
77305+ __u16 num;
77306+ uid_t *uidlist;
77307+ int curuid;
77308+ int realok = 0;
77309+ int effectiveok = 0;
77310+ int fsok = 0;
77311+
77312+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
77313+ struct user_struct *user;
77314+
77315+ if (real == -1)
77316+ goto skipit;
77317+
77318+ user = find_user(real);
77319+ if (user == NULL)
77320+ goto skipit;
77321+
77322+ if (__gr_process_user_ban(user)) {
77323+ /* for find_user */
77324+ free_uid(user);
77325+ return 1;
77326+ }
77327+
77328+ /* for find_user */
77329+ free_uid(user);
77330+
77331+skipit:
77332+#endif
77333+
77334+ if (unlikely(!(gr_status & GR_READY)))
77335+ return 0;
77336+
77337+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
77338+ gr_log_learn_id_change('u', real, effective, fs);
77339+
77340+ num = current->acl->user_trans_num;
77341+ uidlist = current->acl->user_transitions;
77342+
77343+ if (uidlist == NULL)
77344+ return 0;
77345+
77346+ if (real == -1)
77347+ realok = 1;
77348+ if (effective == -1)
77349+ effectiveok = 1;
77350+ if (fs == -1)
77351+ fsok = 1;
77352+
77353+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
77354+ for (i = 0; i < num; i++) {
77355+ curuid = (int)uidlist[i];
77356+ if (real == curuid)
77357+ realok = 1;
77358+ if (effective == curuid)
77359+ effectiveok = 1;
77360+ if (fs == curuid)
77361+ fsok = 1;
77362+ }
77363+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
77364+ for (i = 0; i < num; i++) {
77365+ curuid = (int)uidlist[i];
77366+ if (real == curuid)
77367+ break;
77368+ if (effective == curuid)
77369+ break;
77370+ if (fs == curuid)
77371+ break;
77372+ }
77373+ /* not in deny list */
77374+ if (i == num) {
77375+ realok = 1;
77376+ effectiveok = 1;
77377+ fsok = 1;
77378+ }
77379+ }
77380+
77381+ if (realok && effectiveok && fsok)
77382+ return 0;
77383+ else {
77384+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
77385+ return 1;
77386+ }
77387+}
77388+
77389+int
77390+gr_check_group_change(int real, int effective, int fs)
77391+{
77392+ unsigned int i;
77393+ __u16 num;
77394+ gid_t *gidlist;
77395+ int curgid;
77396+ int realok = 0;
77397+ int effectiveok = 0;
77398+ int fsok = 0;
77399+
77400+ if (unlikely(!(gr_status & GR_READY)))
77401+ return 0;
77402+
77403+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
77404+ gr_log_learn_id_change('g', real, effective, fs);
77405+
77406+ num = current->acl->group_trans_num;
77407+ gidlist = current->acl->group_transitions;
77408+
77409+ if (gidlist == NULL)
77410+ return 0;
77411+
77412+ if (real == -1)
77413+ realok = 1;
77414+ if (effective == -1)
77415+ effectiveok = 1;
77416+ if (fs == -1)
77417+ fsok = 1;
77418+
77419+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
77420+ for (i = 0; i < num; i++) {
77421+ curgid = (int)gidlist[i];
77422+ if (real == curgid)
77423+ realok = 1;
77424+ if (effective == curgid)
77425+ effectiveok = 1;
77426+ if (fs == curgid)
77427+ fsok = 1;
77428+ }
77429+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
77430+ for (i = 0; i < num; i++) {
77431+ curgid = (int)gidlist[i];
77432+ if (real == curgid)
77433+ break;
77434+ if (effective == curgid)
77435+ break;
77436+ if (fs == curgid)
77437+ break;
77438+ }
77439+ /* not in deny list */
77440+ if (i == num) {
77441+ realok = 1;
77442+ effectiveok = 1;
77443+ fsok = 1;
77444+ }
77445+ }
77446+
77447+ if (realok && effectiveok && fsok)
77448+ return 0;
77449+ else {
77450+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
77451+ return 1;
77452+ }
77453+}
77454+
77455+extern int gr_acl_is_capable(const int cap);
77456+
77457+void
77458+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
77459+{
77460+ struct acl_role_label *role = task->role;
77461+ struct acl_subject_label *subj = NULL;
77462+ struct acl_object_label *obj;
77463+ struct file *filp;
77464+
77465+ if (unlikely(!(gr_status & GR_READY)))
77466+ return;
77467+
77468+ filp = task->exec_file;
77469+
77470+ /* kernel process, we'll give them the kernel role */
77471+ if (unlikely(!filp)) {
77472+ task->role = kernel_role;
77473+ task->acl = kernel_role->root_label;
77474+ return;
77475+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
77476+ role = lookup_acl_role_label(task, uid, gid);
77477+
77478+ /* don't change the role if we're not a privileged process */
77479+ if (role && task->role != role &&
77480+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
77481+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
77482+ return;
77483+
77484+ /* perform subject lookup in possibly new role
77485+ we can use this result below in the case where role == task->role
77486+ */
77487+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
77488+
77489+ /* if we changed uid/gid, but result in the same role
77490+ and are using inheritance, don't lose the inherited subject
77491+ if current subject is other than what normal lookup
77492+ would result in, we arrived via inheritance, don't
77493+ lose subject
77494+ */
77495+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
77496+ (subj == task->acl)))
77497+ task->acl = subj;
77498+
77499+ task->role = role;
77500+
77501+ task->is_writable = 0;
77502+
77503+ /* ignore additional mmap checks for processes that are writable
77504+ by the default ACL */
77505+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
77506+ if (unlikely(obj->mode & GR_WRITE))
77507+ task->is_writable = 1;
77508+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
77509+ if (unlikely(obj->mode & GR_WRITE))
77510+ task->is_writable = 1;
77511+
77512+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
77513+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
77514+#endif
77515+
77516+ gr_set_proc_res(task);
77517+
77518+ return;
77519+}
77520+
77521+int
77522+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
77523+ const int unsafe_flags)
77524+{
77525+ struct task_struct *task = current;
77526+ struct acl_subject_label *newacl;
77527+ struct acl_object_label *obj;
77528+ __u32 retmode;
77529+
77530+ if (unlikely(!(gr_status & GR_READY)))
77531+ return 0;
77532+
77533+ newacl = chk_subj_label(dentry, mnt, task->role);
77534+
77535+ task_lock(task);
77536+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
77537+ !(task->role->roletype & GR_ROLE_GOD) &&
77538+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
77539+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
77540+ task_unlock(task);
77541+ if (unsafe_flags & LSM_UNSAFE_SHARE)
77542+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
77543+ else
77544+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
77545+ return -EACCES;
77546+ }
77547+ task_unlock(task);
77548+
77549+ obj = chk_obj_label(dentry, mnt, task->acl);
77550+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
77551+
77552+ if (!(task->acl->mode & GR_INHERITLEARN) &&
77553+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
77554+ if (obj->nested)
77555+ task->acl = obj->nested;
77556+ else
77557+ task->acl = newacl;
77558+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
77559+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
77560+
77561+ task->is_writable = 0;
77562+
77563+ /* ignore additional mmap checks for processes that are writable
77564+ by the default ACL */
77565+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
77566+ if (unlikely(obj->mode & GR_WRITE))
77567+ task->is_writable = 1;
77568+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
77569+ if (unlikely(obj->mode & GR_WRITE))
77570+ task->is_writable = 1;
77571+
77572+ gr_set_proc_res(task);
77573+
77574+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
77575+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
77576+#endif
77577+ return 0;
77578+}
77579+
77580+/* always called with valid inodev ptr */
77581+static void
77582+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
77583+{
77584+ struct acl_object_label *matchpo;
77585+ struct acl_subject_label *matchps;
77586+ struct acl_subject_label *subj;
77587+ struct acl_role_label *role;
77588+ unsigned int x;
77589+
77590+ FOR_EACH_ROLE_START(role)
77591+ FOR_EACH_SUBJECT_START(role, subj, x)
77592+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
77593+ matchpo->mode |= GR_DELETED;
77594+ FOR_EACH_SUBJECT_END(subj,x)
77595+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
77596+ if (subj->inode == ino && subj->device == dev)
77597+ subj->mode |= GR_DELETED;
77598+ FOR_EACH_NESTED_SUBJECT_END(subj)
77599+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
77600+ matchps->mode |= GR_DELETED;
77601+ FOR_EACH_ROLE_END(role)
77602+
77603+ inodev->nentry->deleted = 1;
77604+
77605+ return;
77606+}
77607+
77608+void
77609+gr_handle_delete(const ino_t ino, const dev_t dev)
77610+{
77611+ struct inodev_entry *inodev;
77612+
77613+ if (unlikely(!(gr_status & GR_READY)))
77614+ return;
77615+
77616+ write_lock(&gr_inode_lock);
77617+ inodev = lookup_inodev_entry(ino, dev);
77618+ if (inodev != NULL)
77619+ do_handle_delete(inodev, ino, dev);
77620+ write_unlock(&gr_inode_lock);
77621+
77622+ return;
77623+}
77624+
77625+static void
77626+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
77627+ const ino_t newinode, const dev_t newdevice,
77628+ struct acl_subject_label *subj)
77629+{
77630+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
77631+ struct acl_object_label *match;
77632+
77633+ match = subj->obj_hash[index];
77634+
77635+ while (match && (match->inode != oldinode ||
77636+ match->device != olddevice ||
77637+ !(match->mode & GR_DELETED)))
77638+ match = match->next;
77639+
77640+ if (match && (match->inode == oldinode)
77641+ && (match->device == olddevice)
77642+ && (match->mode & GR_DELETED)) {
77643+ if (match->prev == NULL) {
77644+ subj->obj_hash[index] = match->next;
77645+ if (match->next != NULL)
77646+ match->next->prev = NULL;
77647+ } else {
77648+ match->prev->next = match->next;
77649+ if (match->next != NULL)
77650+ match->next->prev = match->prev;
77651+ }
77652+ match->prev = NULL;
77653+ match->next = NULL;
77654+ match->inode = newinode;
77655+ match->device = newdevice;
77656+ match->mode &= ~GR_DELETED;
77657+
77658+ insert_acl_obj_label(match, subj);
77659+ }
77660+
77661+ return;
77662+}
77663+
77664+static void
77665+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
77666+ const ino_t newinode, const dev_t newdevice,
77667+ struct acl_role_label *role)
77668+{
77669+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
77670+ struct acl_subject_label *match;
77671+
77672+ match = role->subj_hash[index];
77673+
77674+ while (match && (match->inode != oldinode ||
77675+ match->device != olddevice ||
77676+ !(match->mode & GR_DELETED)))
77677+ match = match->next;
77678+
77679+ if (match && (match->inode == oldinode)
77680+ && (match->device == olddevice)
77681+ && (match->mode & GR_DELETED)) {
77682+ if (match->prev == NULL) {
77683+ role->subj_hash[index] = match->next;
77684+ if (match->next != NULL)
77685+ match->next->prev = NULL;
77686+ } else {
77687+ match->prev->next = match->next;
77688+ if (match->next != NULL)
77689+ match->next->prev = match->prev;
77690+ }
77691+ match->prev = NULL;
77692+ match->next = NULL;
77693+ match->inode = newinode;
77694+ match->device = newdevice;
77695+ match->mode &= ~GR_DELETED;
77696+
77697+ insert_acl_subj_label(match, role);
77698+ }
77699+
77700+ return;
77701+}
77702+
77703+static void
77704+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
77705+ const ino_t newinode, const dev_t newdevice)
77706+{
77707+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
77708+ struct inodev_entry *match;
77709+
77710+ match = inodev_set.i_hash[index];
77711+
77712+ while (match && (match->nentry->inode != oldinode ||
77713+ match->nentry->device != olddevice || !match->nentry->deleted))
77714+ match = match->next;
77715+
77716+ if (match && (match->nentry->inode == oldinode)
77717+ && (match->nentry->device == olddevice) &&
77718+ match->nentry->deleted) {
77719+ if (match->prev == NULL) {
77720+ inodev_set.i_hash[index] = match->next;
77721+ if (match->next != NULL)
77722+ match->next->prev = NULL;
77723+ } else {
77724+ match->prev->next = match->next;
77725+ if (match->next != NULL)
77726+ match->next->prev = match->prev;
77727+ }
77728+ match->prev = NULL;
77729+ match->next = NULL;
77730+ match->nentry->inode = newinode;
77731+ match->nentry->device = newdevice;
77732+ match->nentry->deleted = 0;
77733+
77734+ insert_inodev_entry(match);
77735+ }
77736+
77737+ return;
77738+}
77739+
77740+static void
77741+__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
77742+{
77743+ struct acl_subject_label *subj;
77744+ struct acl_role_label *role;
77745+ unsigned int x;
77746+
77747+ FOR_EACH_ROLE_START(role)
77748+ update_acl_subj_label(matchn->inode, matchn->device,
77749+ inode, dev, role);
77750+
77751+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
77752+ if ((subj->inode == inode) && (subj->device == dev)) {
77753+ subj->inode = inode;
77754+ subj->device = dev;
77755+ }
77756+ FOR_EACH_NESTED_SUBJECT_END(subj)
77757+ FOR_EACH_SUBJECT_START(role, subj, x)
77758+ update_acl_obj_label(matchn->inode, matchn->device,
77759+ inode, dev, subj);
77760+ FOR_EACH_SUBJECT_END(subj,x)
77761+ FOR_EACH_ROLE_END(role)
77762+
77763+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
77764+
77765+ return;
77766+}
77767+
77768+static void
77769+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
77770+ const struct vfsmount *mnt)
77771+{
77772+ ino_t ino = dentry->d_inode->i_ino;
77773+ dev_t dev = __get_dev(dentry);
77774+
77775+ __do_handle_create(matchn, ino, dev);
77776+
77777+ return;
77778+}
77779+
77780+void
77781+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
77782+{
77783+ struct name_entry *matchn;
77784+
77785+ if (unlikely(!(gr_status & GR_READY)))
77786+ return;
77787+
77788+ preempt_disable();
77789+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
77790+
77791+ if (unlikely((unsigned long)matchn)) {
77792+ write_lock(&gr_inode_lock);
77793+ do_handle_create(matchn, dentry, mnt);
77794+ write_unlock(&gr_inode_lock);
77795+ }
77796+ preempt_enable();
77797+
77798+ return;
77799+}
77800+
77801+void
77802+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
77803+{
77804+ struct name_entry *matchn;
77805+
77806+ if (unlikely(!(gr_status & GR_READY)))
77807+ return;
77808+
77809+ preempt_disable();
77810+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
77811+
77812+ if (unlikely((unsigned long)matchn)) {
77813+ write_lock(&gr_inode_lock);
77814+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
77815+ write_unlock(&gr_inode_lock);
77816+ }
77817+ preempt_enable();
77818+
77819+ return;
77820+}
77821+
77822+void
77823+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
77824+ struct dentry *old_dentry,
77825+ struct dentry *new_dentry,
77826+ struct vfsmount *mnt, const __u8 replace)
77827+{
77828+ struct name_entry *matchn;
77829+ struct inodev_entry *inodev;
77830+ struct inode *inode = new_dentry->d_inode;
77831+ ino_t oldinode = old_dentry->d_inode->i_ino;
77832+ dev_t olddev = __get_dev(old_dentry);
77833+
77834+ /* vfs_rename swaps the name and parent link for old_dentry and
77835+ new_dentry
77836+ at this point, old_dentry has the new name, parent link, and inode
77837+ for the renamed file
77838+ if a file is being replaced by a rename, new_dentry has the inode
77839+ and name for the replaced file
77840+ */
77841+
77842+ if (unlikely(!(gr_status & GR_READY)))
77843+ return;
77844+
77845+ preempt_disable();
77846+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
77847+
77848+ /* we wouldn't have to check d_inode if it weren't for
77849+ NFS silly-renaming
77850+ */
77851+
77852+ write_lock(&gr_inode_lock);
77853+ if (unlikely(replace && inode)) {
77854+ ino_t newinode = inode->i_ino;
77855+ dev_t newdev = __get_dev(new_dentry);
77856+ inodev = lookup_inodev_entry(newinode, newdev);
77857+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
77858+ do_handle_delete(inodev, newinode, newdev);
77859+ }
77860+
77861+ inodev = lookup_inodev_entry(oldinode, olddev);
77862+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
77863+ do_handle_delete(inodev, oldinode, olddev);
77864+
77865+ if (unlikely((unsigned long)matchn))
77866+ do_handle_create(matchn, old_dentry, mnt);
77867+
77868+ write_unlock(&gr_inode_lock);
77869+ preempt_enable();
77870+
77871+ return;
77872+}
77873+
77874+static int
77875+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
77876+ unsigned char **sum)
77877+{
77878+ struct acl_role_label *r;
77879+ struct role_allowed_ip *ipp;
77880+ struct role_transition *trans;
77881+ unsigned int i;
77882+ int found = 0;
77883+ u32 curr_ip = current->signal->curr_ip;
77884+
77885+ current->signal->saved_ip = curr_ip;
77886+
77887+ /* check transition table */
77888+
77889+ for (trans = current->role->transitions; trans; trans = trans->next) {
77890+ if (!strcmp(rolename, trans->rolename)) {
77891+ found = 1;
77892+ break;
77893+ }
77894+ }
77895+
77896+ if (!found)
77897+ return 0;
77898+
77899+ /* handle special roles that do not require authentication
77900+ and check ip */
77901+
77902+ FOR_EACH_ROLE_START(r)
77903+ if (!strcmp(rolename, r->rolename) &&
77904+ (r->roletype & GR_ROLE_SPECIAL)) {
77905+ found = 0;
77906+ if (r->allowed_ips != NULL) {
77907+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
77908+ if ((ntohl(curr_ip) & ipp->netmask) ==
77909+ (ntohl(ipp->addr) & ipp->netmask))
77910+ found = 1;
77911+ }
77912+ } else
77913+ found = 2;
77914+ if (!found)
77915+ return 0;
77916+
77917+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
77918+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
77919+ *salt = NULL;
77920+ *sum = NULL;
77921+ return 1;
77922+ }
77923+ }
77924+ FOR_EACH_ROLE_END(r)
77925+
77926+ for (i = 0; i < num_sprole_pws; i++) {
77927+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
77928+ *salt = acl_special_roles[i]->salt;
77929+ *sum = acl_special_roles[i]->sum;
77930+ return 1;
77931+ }
77932+ }
77933+
77934+ return 0;
77935+}
77936+
77937+static void
77938+assign_special_role(char *rolename)
77939+{
77940+ struct acl_object_label *obj;
77941+ struct acl_role_label *r;
77942+ struct acl_role_label *assigned = NULL;
77943+ struct task_struct *tsk;
77944+ struct file *filp;
77945+
77946+ FOR_EACH_ROLE_START(r)
77947+ if (!strcmp(rolename, r->rolename) &&
77948+ (r->roletype & GR_ROLE_SPECIAL)) {
77949+ assigned = r;
77950+ break;
77951+ }
77952+ FOR_EACH_ROLE_END(r)
77953+
77954+ if (!assigned)
77955+ return;
77956+
77957+ read_lock(&tasklist_lock);
77958+ read_lock(&grsec_exec_file_lock);
77959+
77960+ tsk = current->real_parent;
77961+ if (tsk == NULL)
77962+ goto out_unlock;
77963+
77964+ filp = tsk->exec_file;
77965+ if (filp == NULL)
77966+ goto out_unlock;
77967+
77968+ tsk->is_writable = 0;
77969+
77970+ tsk->acl_sp_role = 1;
77971+ tsk->acl_role_id = ++acl_sp_role_value;
77972+ tsk->role = assigned;
77973+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
77974+
77975+ /* ignore additional mmap checks for processes that are writable
77976+ by the default ACL */
77977+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
77978+ if (unlikely(obj->mode & GR_WRITE))
77979+ tsk->is_writable = 1;
77980+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
77981+ if (unlikely(obj->mode & GR_WRITE))
77982+ tsk->is_writable = 1;
77983+
77984+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
77985+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
77986+#endif
77987+
77988+out_unlock:
77989+ read_unlock(&grsec_exec_file_lock);
77990+ read_unlock(&tasklist_lock);
77991+ return;
77992+}
77993+
77994+int gr_check_secure_terminal(struct task_struct *task)
77995+{
77996+ struct task_struct *p, *p2, *p3;
77997+ struct files_struct *files;
77998+ struct fdtable *fdt;
77999+ struct file *our_file = NULL, *file;
78000+ int i;
78001+
78002+ if (task->signal->tty == NULL)
78003+ return 1;
78004+
78005+ files = get_files_struct(task);
78006+ if (files != NULL) {
78007+ rcu_read_lock();
78008+ fdt = files_fdtable(files);
78009+ for (i=0; i < fdt->max_fds; i++) {
78010+ file = fcheck_files(files, i);
78011+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
78012+ get_file(file);
78013+ our_file = file;
78014+ }
78015+ }
78016+ rcu_read_unlock();
78017+ put_files_struct(files);
78018+ }
78019+
78020+ if (our_file == NULL)
78021+ return 1;
78022+
78023+ read_lock(&tasklist_lock);
78024+ do_each_thread(p2, p) {
78025+ files = get_files_struct(p);
78026+ if (files == NULL ||
78027+ (p->signal && p->signal->tty == task->signal->tty)) {
78028+ if (files != NULL)
78029+ put_files_struct(files);
78030+ continue;
78031+ }
78032+ rcu_read_lock();
78033+ fdt = files_fdtable(files);
78034+ for (i=0; i < fdt->max_fds; i++) {
78035+ file = fcheck_files(files, i);
78036+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
78037+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
78038+ p3 = task;
78039+ while (p3->pid > 0) {
78040+ if (p3 == p)
78041+ break;
78042+ p3 = p3->real_parent;
78043+ }
78044+ if (p3 == p)
78045+ break;
78046+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
78047+ gr_handle_alertkill(p);
78048+ rcu_read_unlock();
78049+ put_files_struct(files);
78050+ read_unlock(&tasklist_lock);
78051+ fput(our_file);
78052+ return 0;
78053+ }
78054+ }
78055+ rcu_read_unlock();
78056+ put_files_struct(files);
78057+ } while_each_thread(p2, p);
78058+ read_unlock(&tasklist_lock);
78059+
78060+ fput(our_file);
78061+ return 1;
78062+}
78063+
78064+ssize_t
78065+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
78066+{
78067+ struct gr_arg_wrapper uwrap;
78068+ unsigned char *sprole_salt = NULL;
78069+ unsigned char *sprole_sum = NULL;
78070+ int error = sizeof (struct gr_arg_wrapper);
78071+ int error2 = 0;
78072+
78073+ mutex_lock(&gr_dev_mutex);
78074+
78075+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
78076+ error = -EPERM;
78077+ goto out;
78078+ }
78079+
78080+ if (count != sizeof (struct gr_arg_wrapper)) {
78081+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
78082+ error = -EINVAL;
78083+ goto out;
78084+ }
78085+
78086+
78087+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
78088+ gr_auth_expires = 0;
78089+ gr_auth_attempts = 0;
78090+ }
78091+
78092+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
78093+ error = -EFAULT;
78094+ goto out;
78095+ }
78096+
78097+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
78098+ error = -EINVAL;
78099+ goto out;
78100+ }
78101+
78102+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
78103+ error = -EFAULT;
78104+ goto out;
78105+ }
78106+
78107+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
78108+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
78109+ time_after(gr_auth_expires, get_seconds())) {
78110+ error = -EBUSY;
78111+ goto out;
78112+ }
78113+
78114+ /* if non-root trying to do anything other than use a special role,
78115+ do not attempt authentication, do not count towards authentication
78116+ locking
78117+ */
78118+
78119+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
78120+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
78121+ current_uid()) {
78122+ error = -EPERM;
78123+ goto out;
78124+ }
78125+
78126+ /* ensure pw and special role name are null terminated */
78127+
78128+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
78129+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
78130+
78131+ /* Okay.
78132+ * We have our enough of the argument structure..(we have yet
78133+ * to copy_from_user the tables themselves) . Copy the tables
78134+ * only if we need them, i.e. for loading operations. */
78135+
78136+ switch (gr_usermode->mode) {
78137+ case GR_STATUS:
78138+ if (gr_status & GR_READY) {
78139+ error = 1;
78140+ if (!gr_check_secure_terminal(current))
78141+ error = 3;
78142+ } else
78143+ error = 2;
78144+ goto out;
78145+ case GR_SHUTDOWN:
78146+ if ((gr_status & GR_READY)
78147+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
78148+ pax_open_kernel();
78149+ gr_status &= ~GR_READY;
78150+ pax_close_kernel();
78151+
78152+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
78153+ free_variables();
78154+ memset(gr_usermode, 0, sizeof (struct gr_arg));
78155+ memset(gr_system_salt, 0, GR_SALT_LEN);
78156+ memset(gr_system_sum, 0, GR_SHA_LEN);
78157+ } else if (gr_status & GR_READY) {
78158+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
78159+ error = -EPERM;
78160+ } else {
78161+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
78162+ error = -EAGAIN;
78163+ }
78164+ break;
78165+ case GR_ENABLE:
78166+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
78167+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
78168+ else {
78169+ if (gr_status & GR_READY)
78170+ error = -EAGAIN;
78171+ else
78172+ error = error2;
78173+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
78174+ }
78175+ break;
78176+ case GR_RELOAD:
78177+ if (!(gr_status & GR_READY)) {
78178+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
78179+ error = -EAGAIN;
78180+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
78181+ lock_kernel();
78182+
78183+ pax_open_kernel();
78184+ gr_status &= ~GR_READY;
78185+ pax_close_kernel();
78186+
78187+ free_variables();
78188+ if (!(error2 = gracl_init(gr_usermode))) {
78189+ unlock_kernel();
78190+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
78191+ } else {
78192+ unlock_kernel();
78193+ error = error2;
78194+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
78195+ }
78196+ } else {
78197+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
78198+ error = -EPERM;
78199+ }
78200+ break;
78201+ case GR_SEGVMOD:
78202+ if (unlikely(!(gr_status & GR_READY))) {
78203+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
78204+ error = -EAGAIN;
78205+ break;
78206+ }
78207+
78208+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
78209+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
78210+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
78211+ struct acl_subject_label *segvacl;
78212+ segvacl =
78213+ lookup_acl_subj_label(gr_usermode->segv_inode,
78214+ gr_usermode->segv_device,
78215+ current->role);
78216+ if (segvacl) {
78217+ segvacl->crashes = 0;
78218+ segvacl->expires = 0;
78219+ }
78220+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
78221+ gr_remove_uid(gr_usermode->segv_uid);
78222+ }
78223+ } else {
78224+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
78225+ error = -EPERM;
78226+ }
78227+ break;
78228+ case GR_SPROLE:
78229+ case GR_SPROLEPAM:
78230+ if (unlikely(!(gr_status & GR_READY))) {
78231+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
78232+ error = -EAGAIN;
78233+ break;
78234+ }
78235+
78236+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
78237+ current->role->expires = 0;
78238+ current->role->auth_attempts = 0;
78239+ }
78240+
78241+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
78242+ time_after(current->role->expires, get_seconds())) {
78243+ error = -EBUSY;
78244+ goto out;
78245+ }
78246+
78247+ if (lookup_special_role_auth
78248+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
78249+ && ((!sprole_salt && !sprole_sum)
78250+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
78251+ char *p = "";
78252+ assign_special_role(gr_usermode->sp_role);
78253+ read_lock(&tasklist_lock);
78254+ if (current->real_parent)
78255+ p = current->real_parent->role->rolename;
78256+ read_unlock(&tasklist_lock);
78257+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
78258+ p, acl_sp_role_value);
78259+ } else {
78260+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
78261+ error = -EPERM;
78262+ if(!(current->role->auth_attempts++))
78263+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
78264+
78265+ goto out;
78266+ }
78267+ break;
78268+ case GR_UNSPROLE:
78269+ if (unlikely(!(gr_status & GR_READY))) {
78270+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
78271+ error = -EAGAIN;
78272+ break;
78273+ }
78274+
78275+ if (current->role->roletype & GR_ROLE_SPECIAL) {
78276+ char *p = "";
78277+ int i = 0;
78278+
78279+ read_lock(&tasklist_lock);
78280+ if (current->real_parent) {
78281+ p = current->real_parent->role->rolename;
78282+ i = current->real_parent->acl_role_id;
78283+ }
78284+ read_unlock(&tasklist_lock);
78285+
78286+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
78287+ gr_set_acls(1);
78288+ } else {
78289+ error = -EPERM;
78290+ goto out;
78291+ }
78292+ break;
78293+ default:
78294+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
78295+ error = -EINVAL;
78296+ break;
78297+ }
78298+
78299+ if (error != -EPERM)
78300+ goto out;
78301+
78302+ if(!(gr_auth_attempts++))
78303+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
78304+
78305+ out:
78306+ mutex_unlock(&gr_dev_mutex);
78307+ return error;
78308+}
78309+
78310+/* must be called with
78311+ rcu_read_lock();
78312+ read_lock(&tasklist_lock);
78313+ read_lock(&grsec_exec_file_lock);
78314+*/
78315+int gr_apply_subject_to_task(struct task_struct *task)
78316+{
78317+ struct acl_object_label *obj;
78318+ char *tmpname;
78319+ struct acl_subject_label *tmpsubj;
78320+ struct file *filp;
78321+ struct name_entry *nmatch;
78322+
78323+ filp = task->exec_file;
78324+ if (filp == NULL)
78325+ return 0;
78326+
78327+ /* the following is to apply the correct subject
78328+ on binaries running when the RBAC system
78329+ is enabled, when the binaries have been
78330+ replaced or deleted since their execution
78331+ -----
78332+ when the RBAC system starts, the inode/dev
78333+ from exec_file will be one the RBAC system
78334+ is unaware of. It only knows the inode/dev
78335+ of the present file on disk, or the absence
78336+ of it.
78337+ */
78338+ preempt_disable();
78339+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
78340+
78341+ nmatch = lookup_name_entry(tmpname);
78342+ preempt_enable();
78343+ tmpsubj = NULL;
78344+ if (nmatch) {
78345+ if (nmatch->deleted)
78346+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
78347+ else
78348+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
78349+ if (tmpsubj != NULL)
78350+ task->acl = tmpsubj;
78351+ }
78352+ if (tmpsubj == NULL)
78353+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
78354+ task->role);
78355+ if (task->acl) {
78356+ task->is_writable = 0;
78357+ /* ignore additional mmap checks for processes that are writable
78358+ by the default ACL */
78359+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
78360+ if (unlikely(obj->mode & GR_WRITE))
78361+ task->is_writable = 1;
78362+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
78363+ if (unlikely(obj->mode & GR_WRITE))
78364+ task->is_writable = 1;
78365+
78366+ gr_set_proc_res(task);
78367+
78368+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
78369+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
78370+#endif
78371+ } else {
78372+ return 1;
78373+ }
78374+
78375+ return 0;
78376+}
78377+
78378+int
78379+gr_set_acls(const int type)
78380+{
78381+ struct task_struct *task, *task2;
78382+ struct acl_role_label *role = current->role;
78383+ __u16 acl_role_id = current->acl_role_id;
78384+ const struct cred *cred;
78385+ int ret;
78386+
78387+ rcu_read_lock();
78388+ read_lock(&tasklist_lock);
78389+ read_lock(&grsec_exec_file_lock);
78390+ do_each_thread(task2, task) {
78391+ /* check to see if we're called from the exit handler,
78392+ if so, only replace ACLs that have inherited the admin
78393+ ACL */
78394+
78395+ if (type && (task->role != role ||
78396+ task->acl_role_id != acl_role_id))
78397+ continue;
78398+
78399+ task->acl_role_id = 0;
78400+ task->acl_sp_role = 0;
78401+
78402+ if (task->exec_file) {
78403+ cred = __task_cred(task);
78404+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
78405+
78406+ ret = gr_apply_subject_to_task(task);
78407+ if (ret) {
78408+ read_unlock(&grsec_exec_file_lock);
78409+ read_unlock(&tasklist_lock);
78410+ rcu_read_unlock();
78411+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
78412+ return ret;
78413+ }
78414+ } else {
78415+ // it's a kernel process
78416+ task->role = kernel_role;
78417+ task->acl = kernel_role->root_label;
78418+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
78419+ task->acl->mode &= ~GR_PROCFIND;
78420+#endif
78421+ }
78422+ } while_each_thread(task2, task);
78423+ read_unlock(&grsec_exec_file_lock);
78424+ read_unlock(&tasklist_lock);
78425+ rcu_read_unlock();
78426+
78427+ return 0;
78428+}
78429+
78430+void
78431+gr_learn_resource(const struct task_struct *task,
78432+ const int res, const unsigned long wanted, const int gt)
78433+{
78434+ struct acl_subject_label *acl;
78435+ const struct cred *cred;
78436+
78437+ if (unlikely((gr_status & GR_READY) &&
78438+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
78439+ goto skip_reslog;
78440+
78441+#ifdef CONFIG_GRKERNSEC_RESLOG
78442+ gr_log_resource(task, res, wanted, gt);
78443+#endif
78444+ skip_reslog:
78445+
78446+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
78447+ return;
78448+
78449+ acl = task->acl;
78450+
78451+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
78452+ !(acl->resmask & (1 << (unsigned short) res))))
78453+ return;
78454+
78455+ if (wanted >= acl->res[res].rlim_cur) {
78456+ unsigned long res_add;
78457+
78458+ res_add = wanted;
78459+ switch (res) {
78460+ case RLIMIT_CPU:
78461+ res_add += GR_RLIM_CPU_BUMP;
78462+ break;
78463+ case RLIMIT_FSIZE:
78464+ res_add += GR_RLIM_FSIZE_BUMP;
78465+ break;
78466+ case RLIMIT_DATA:
78467+ res_add += GR_RLIM_DATA_BUMP;
78468+ break;
78469+ case RLIMIT_STACK:
78470+ res_add += GR_RLIM_STACK_BUMP;
78471+ break;
78472+ case RLIMIT_CORE:
78473+ res_add += GR_RLIM_CORE_BUMP;
78474+ break;
78475+ case RLIMIT_RSS:
78476+ res_add += GR_RLIM_RSS_BUMP;
78477+ break;
78478+ case RLIMIT_NPROC:
78479+ res_add += GR_RLIM_NPROC_BUMP;
78480+ break;
78481+ case RLIMIT_NOFILE:
78482+ res_add += GR_RLIM_NOFILE_BUMP;
78483+ break;
78484+ case RLIMIT_MEMLOCK:
78485+ res_add += GR_RLIM_MEMLOCK_BUMP;
78486+ break;
78487+ case RLIMIT_AS:
78488+ res_add += GR_RLIM_AS_BUMP;
78489+ break;
78490+ case RLIMIT_LOCKS:
78491+ res_add += GR_RLIM_LOCKS_BUMP;
78492+ break;
78493+ case RLIMIT_SIGPENDING:
78494+ res_add += GR_RLIM_SIGPENDING_BUMP;
78495+ break;
78496+ case RLIMIT_MSGQUEUE:
78497+ res_add += GR_RLIM_MSGQUEUE_BUMP;
78498+ break;
78499+ case RLIMIT_NICE:
78500+ res_add += GR_RLIM_NICE_BUMP;
78501+ break;
78502+ case RLIMIT_RTPRIO:
78503+ res_add += GR_RLIM_RTPRIO_BUMP;
78504+ break;
78505+ case RLIMIT_RTTIME:
78506+ res_add += GR_RLIM_RTTIME_BUMP;
78507+ break;
78508+ }
78509+
78510+ acl->res[res].rlim_cur = res_add;
78511+
78512+ if (wanted > acl->res[res].rlim_max)
78513+ acl->res[res].rlim_max = res_add;
78514+
78515+ /* only log the subject filename, since resource logging is supported for
78516+ single-subject learning only */
78517+ rcu_read_lock();
78518+ cred = __task_cred(task);
78519+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
78520+ task->role->roletype, cred->uid, cred->gid, acl->filename,
78521+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
78522+ "", (unsigned long) res, &task->signal->saved_ip);
78523+ rcu_read_unlock();
78524+ }
78525+
78526+ return;
78527+}
78528+
78529+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
78530+void
78531+pax_set_initial_flags(struct linux_binprm *bprm)
78532+{
78533+ struct task_struct *task = current;
78534+ struct acl_subject_label *proc;
78535+ unsigned long flags;
78536+
78537+ if (unlikely(!(gr_status & GR_READY)))
78538+ return;
78539+
78540+ flags = pax_get_flags(task);
78541+
78542+ proc = task->acl;
78543+
78544+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
78545+ flags &= ~MF_PAX_PAGEEXEC;
78546+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
78547+ flags &= ~MF_PAX_SEGMEXEC;
78548+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
78549+ flags &= ~MF_PAX_RANDMMAP;
78550+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
78551+ flags &= ~MF_PAX_EMUTRAMP;
78552+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
78553+ flags &= ~MF_PAX_MPROTECT;
78554+
78555+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
78556+ flags |= MF_PAX_PAGEEXEC;
78557+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
78558+ flags |= MF_PAX_SEGMEXEC;
78559+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
78560+ flags |= MF_PAX_RANDMMAP;
78561+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
78562+ flags |= MF_PAX_EMUTRAMP;
78563+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
78564+ flags |= MF_PAX_MPROTECT;
78565+
78566+ pax_set_flags(task, flags);
78567+
78568+ return;
78569+}
78570+#endif
78571+
78572+#ifdef CONFIG_SYSCTL
78573+/* Eric Biederman likes breaking userland ABI and every inode-based security
78574+ system to save 35kb of memory */
78575+
78576+/* we modify the passed in filename, but adjust it back before returning */
78577+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
78578+{
78579+ struct name_entry *nmatch;
78580+ char *p, *lastp = NULL;
78581+ struct acl_object_label *obj = NULL, *tmp;
78582+ struct acl_subject_label *tmpsubj;
78583+ char c = '\0';
78584+
78585+ read_lock(&gr_inode_lock);
78586+
78587+ p = name + len - 1;
78588+ do {
78589+ nmatch = lookup_name_entry(name);
78590+ if (lastp != NULL)
78591+ *lastp = c;
78592+
78593+ if (nmatch == NULL)
78594+ goto next_component;
78595+ tmpsubj = current->acl;
78596+ do {
78597+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
78598+ if (obj != NULL) {
78599+ tmp = obj->globbed;
78600+ while (tmp) {
78601+ if (!glob_match(tmp->filename, name)) {
78602+ obj = tmp;
78603+ goto found_obj;
78604+ }
78605+ tmp = tmp->next;
78606+ }
78607+ goto found_obj;
78608+ }
78609+ } while ((tmpsubj = tmpsubj->parent_subject));
78610+next_component:
78611+ /* end case */
78612+ if (p == name)
78613+ break;
78614+
78615+ while (*p != '/')
78616+ p--;
78617+ if (p == name)
78618+ lastp = p + 1;
78619+ else {
78620+ lastp = p;
78621+ p--;
78622+ }
78623+ c = *lastp;
78624+ *lastp = '\0';
78625+ } while (1);
78626+found_obj:
78627+ read_unlock(&gr_inode_lock);
78628+ /* obj returned will always be non-null */
78629+ return obj;
78630+}
78631+
78632+/* returns 0 when allowing, non-zero on error
78633+ op of 0 is used for readdir, so we don't log the names of hidden files
78634+*/
78635+__u32
78636+gr_handle_sysctl(const struct ctl_table *table, const int op)
78637+{
78638+ ctl_table *tmp;
78639+ const char *proc_sys = "/proc/sys";
78640+ char *path;
78641+ struct acl_object_label *obj;
78642+ unsigned short len = 0, pos = 0, depth = 0, i;
78643+ __u32 err = 0;
78644+ __u32 mode = 0;
78645+
78646+ if (unlikely(!(gr_status & GR_READY)))
78647+ return 0;
78648+
78649+ /* for now, ignore operations on non-sysctl entries if it's not a
78650+ readdir*/
78651+ if (table->child != NULL && op != 0)
78652+ return 0;
78653+
78654+ mode |= GR_FIND;
78655+ /* it's only a read if it's an entry, read on dirs is for readdir */
78656+ if (op & MAY_READ)
78657+ mode |= GR_READ;
78658+ if (op & MAY_WRITE)
78659+ mode |= GR_WRITE;
78660+
78661+ preempt_disable();
78662+
78663+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
78664+
78665+ /* it's only a read/write if it's an actual entry, not a dir
78666+ (which are opened for readdir)
78667+ */
78668+
78669+ /* convert the requested sysctl entry into a pathname */
78670+
78671+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
78672+ len += strlen(tmp->procname);
78673+ len++;
78674+ depth++;
78675+ }
78676+
78677+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
78678+ /* deny */
78679+ goto out;
78680+ }
78681+
78682+ memset(path, 0, PAGE_SIZE);
78683+
78684+ memcpy(path, proc_sys, strlen(proc_sys));
78685+
78686+ pos += strlen(proc_sys);
78687+
78688+ for (; depth > 0; depth--) {
78689+ path[pos] = '/';
78690+ pos++;
78691+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
78692+ if (depth == i) {
78693+ memcpy(path + pos, tmp->procname,
78694+ strlen(tmp->procname));
78695+ pos += strlen(tmp->procname);
78696+ }
78697+ i++;
78698+ }
78699+ }
78700+
78701+ obj = gr_lookup_by_name(path, pos);
78702+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
78703+
78704+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
78705+ ((err & mode) != mode))) {
78706+ __u32 new_mode = mode;
78707+
78708+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
78709+
78710+ err = 0;
78711+ gr_log_learn_sysctl(path, new_mode);
78712+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
78713+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
78714+ err = -ENOENT;
78715+ } else if (!(err & GR_FIND)) {
78716+ err = -ENOENT;
78717+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
78718+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
78719+ path, (mode & GR_READ) ? " reading" : "",
78720+ (mode & GR_WRITE) ? " writing" : "");
78721+ err = -EACCES;
78722+ } else if ((err & mode) != mode) {
78723+ err = -EACCES;
78724+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
78725+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
78726+ path, (mode & GR_READ) ? " reading" : "",
78727+ (mode & GR_WRITE) ? " writing" : "");
78728+ err = 0;
78729+ } else
78730+ err = 0;
78731+
78732+ out:
78733+ preempt_enable();
78734+
78735+ return err;
78736+}
78737+#endif
78738+
78739+int
78740+gr_handle_proc_ptrace(struct task_struct *task)
78741+{
78742+ struct file *filp;
78743+ struct task_struct *tmp = task;
78744+ struct task_struct *curtemp = current;
78745+ __u32 retmode;
78746+
78747+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
78748+ if (unlikely(!(gr_status & GR_READY)))
78749+ return 0;
78750+#endif
78751+
78752+ read_lock(&tasklist_lock);
78753+ read_lock(&grsec_exec_file_lock);
78754+ filp = task->exec_file;
78755+
78756+ while (tmp->pid > 0) {
78757+ if (tmp == curtemp)
78758+ break;
78759+ tmp = tmp->real_parent;
78760+ }
78761+
78762+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
78763+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
78764+ read_unlock(&grsec_exec_file_lock);
78765+ read_unlock(&tasklist_lock);
78766+ return 1;
78767+ }
78768+
78769+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78770+ if (!(gr_status & GR_READY)) {
78771+ read_unlock(&grsec_exec_file_lock);
78772+ read_unlock(&tasklist_lock);
78773+ return 0;
78774+ }
78775+#endif
78776+
78777+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
78778+ read_unlock(&grsec_exec_file_lock);
78779+ read_unlock(&tasklist_lock);
78780+
78781+ if (retmode & GR_NOPTRACE)
78782+ return 1;
78783+
78784+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
78785+ && (current->acl != task->acl || (current->acl != current->role->root_label
78786+ && current->pid != task->pid)))
78787+ return 1;
78788+
78789+ return 0;
78790+}
78791+
78792+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
78793+{
78794+ if (unlikely(!(gr_status & GR_READY)))
78795+ return;
78796+
78797+ if (!(current->role->roletype & GR_ROLE_GOD))
78798+ return;
78799+
78800+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
78801+ p->role->rolename, gr_task_roletype_to_char(p),
78802+ p->acl->filename);
78803+}
78804+
78805+int
78806+gr_handle_ptrace(struct task_struct *task, const long request)
78807+{
78808+ struct task_struct *tmp = task;
78809+ struct task_struct *curtemp = current;
78810+ __u32 retmode;
78811+
78812+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
78813+ if (unlikely(!(gr_status & GR_READY)))
78814+ return 0;
78815+#endif
78816+
78817+ read_lock(&tasklist_lock);
78818+ while (tmp->pid > 0) {
78819+ if (tmp == curtemp)
78820+ break;
78821+ tmp = tmp->real_parent;
78822+ }
78823+
78824+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
78825+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
78826+ read_unlock(&tasklist_lock);
78827+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
78828+ return 1;
78829+ }
78830+ read_unlock(&tasklist_lock);
78831+
78832+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78833+ if (!(gr_status & GR_READY))
78834+ return 0;
78835+#endif
78836+
78837+ read_lock(&grsec_exec_file_lock);
78838+ if (unlikely(!task->exec_file)) {
78839+ read_unlock(&grsec_exec_file_lock);
78840+ return 0;
78841+ }
78842+
78843+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
78844+ read_unlock(&grsec_exec_file_lock);
78845+
78846+ if (retmode & GR_NOPTRACE) {
78847+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
78848+ return 1;
78849+ }
78850+
78851+ if (retmode & GR_PTRACERD) {
78852+ switch (request) {
78853+ case PTRACE_POKETEXT:
78854+ case PTRACE_POKEDATA:
78855+ case PTRACE_POKEUSR:
78856+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
78857+ case PTRACE_SETREGS:
78858+ case PTRACE_SETFPREGS:
78859+#endif
78860+#ifdef CONFIG_X86
78861+ case PTRACE_SETFPXREGS:
78862+#endif
78863+#ifdef CONFIG_ALTIVEC
78864+ case PTRACE_SETVRREGS:
78865+#endif
78866+ return 1;
78867+ default:
78868+ return 0;
78869+ }
78870+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
78871+ !(current->role->roletype & GR_ROLE_GOD) &&
78872+ (current->acl != task->acl)) {
78873+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
78874+ return 1;
78875+ }
78876+
78877+ return 0;
78878+}
78879+
78880+static int is_writable_mmap(const struct file *filp)
78881+{
78882+ struct task_struct *task = current;
78883+ struct acl_object_label *obj, *obj2;
78884+
78885+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
78886+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
78887+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
78888+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
78889+ task->role->root_label);
78890+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
78891+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
78892+ return 1;
78893+ }
78894+ }
78895+ return 0;
78896+}
78897+
78898+int
78899+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
78900+{
78901+ __u32 mode;
78902+
78903+ if (unlikely(!file || !(prot & PROT_EXEC)))
78904+ return 1;
78905+
78906+ if (is_writable_mmap(file))
78907+ return 0;
78908+
78909+ mode =
78910+ gr_search_file(file->f_path.dentry,
78911+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
78912+ file->f_path.mnt);
78913+
78914+ if (!gr_tpe_allow(file))
78915+ return 0;
78916+
78917+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
78918+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
78919+ return 0;
78920+ } else if (unlikely(!(mode & GR_EXEC))) {
78921+ return 0;
78922+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
78923+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
78924+ return 1;
78925+ }
78926+
78927+ return 1;
78928+}
78929+
78930+int
78931+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
78932+{
78933+ __u32 mode;
78934+
78935+ if (unlikely(!file || !(prot & PROT_EXEC)))
78936+ return 1;
78937+
78938+ if (is_writable_mmap(file))
78939+ return 0;
78940+
78941+ mode =
78942+ gr_search_file(file->f_path.dentry,
78943+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
78944+ file->f_path.mnt);
78945+
78946+ if (!gr_tpe_allow(file))
78947+ return 0;
78948+
78949+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
78950+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
78951+ return 0;
78952+ } else if (unlikely(!(mode & GR_EXEC))) {
78953+ return 0;
78954+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
78955+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
78956+ return 1;
78957+ }
78958+
78959+ return 1;
78960+}
78961+
78962+void
78963+gr_acl_handle_psacct(struct task_struct *task, const long code)
78964+{
78965+ unsigned long runtime;
78966+ unsigned long cputime;
78967+ unsigned int wday, cday;
78968+ __u8 whr, chr;
78969+ __u8 wmin, cmin;
78970+ __u8 wsec, csec;
78971+ struct timespec timeval;
78972+
78973+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
78974+ !(task->acl->mode & GR_PROCACCT)))
78975+ return;
78976+
78977+ do_posix_clock_monotonic_gettime(&timeval);
78978+ runtime = timeval.tv_sec - task->start_time.tv_sec;
78979+ wday = runtime / (3600 * 24);
78980+ runtime -= wday * (3600 * 24);
78981+ whr = runtime / 3600;
78982+ runtime -= whr * 3600;
78983+ wmin = runtime / 60;
78984+ runtime -= wmin * 60;
78985+ wsec = runtime;
78986+
78987+ cputime = (task->utime + task->stime) / HZ;
78988+ cday = cputime / (3600 * 24);
78989+ cputime -= cday * (3600 * 24);
78990+ chr = cputime / 3600;
78991+ cputime -= chr * 3600;
78992+ cmin = cputime / 60;
78993+ cputime -= cmin * 60;
78994+ csec = cputime;
78995+
78996+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
78997+
78998+ return;
78999+}
79000+
79001+void gr_set_kernel_label(struct task_struct *task)
79002+{
79003+ if (gr_status & GR_READY) {
79004+ task->role = kernel_role;
79005+ task->acl = kernel_role->root_label;
79006+ }
79007+ return;
79008+}
79009+
79010+#ifdef CONFIG_TASKSTATS
79011+int gr_is_taskstats_denied(int pid)
79012+{
79013+ struct task_struct *task;
79014+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79015+ const struct cred *cred;
79016+#endif
79017+ int ret = 0;
79018+
79019+ /* restrict taskstats viewing to un-chrooted root users
79020+ who have the 'view' subject flag if the RBAC system is enabled
79021+ */
79022+
79023+ rcu_read_lock();
79024+ read_lock(&tasklist_lock);
79025+ task = find_task_by_vpid(pid);
79026+ if (task) {
79027+#ifdef CONFIG_GRKERNSEC_CHROOT
79028+ if (proc_is_chrooted(task))
79029+ ret = -EACCES;
79030+#endif
79031+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79032+ cred = __task_cred(task);
79033+#ifdef CONFIG_GRKERNSEC_PROC_USER
79034+ if (cred->uid != 0)
79035+ ret = -EACCES;
79036+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79037+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
79038+ ret = -EACCES;
79039+#endif
79040+#endif
79041+ if (gr_status & GR_READY) {
79042+ if (!(task->acl->mode & GR_VIEW))
79043+ ret = -EACCES;
79044+ }
79045+ } else
79046+ ret = -ENOENT;
79047+
79048+ read_unlock(&tasklist_lock);
79049+ rcu_read_unlock();
79050+
79051+ return ret;
79052+}
79053+#endif
79054+
79055+/* AUXV entries are filled via a descendant of search_binary_handler
79056+ after we've already applied the subject for the target
79057+*/
79058+int gr_acl_enable_at_secure(void)
79059+{
79060+ if (unlikely(!(gr_status & GR_READY)))
79061+ return 0;
79062+
79063+ if (current->acl->mode & GR_ATSECURE)
79064+ return 1;
79065+
79066+ return 0;
79067+}
79068+
79069+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
79070+{
79071+ struct task_struct *task = current;
79072+ struct dentry *dentry = file->f_path.dentry;
79073+ struct vfsmount *mnt = file->f_path.mnt;
79074+ struct acl_object_label *obj, *tmp;
79075+ struct acl_subject_label *subj;
79076+ unsigned int bufsize;
79077+ int is_not_root;
79078+ char *path;
79079+ dev_t dev = __get_dev(dentry);
79080+
79081+ if (unlikely(!(gr_status & GR_READY)))
79082+ return 1;
79083+
79084+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
79085+ return 1;
79086+
79087+ /* ignore Eric Biederman */
79088+ if (IS_PRIVATE(dentry->d_inode))
79089+ return 1;
79090+
79091+ subj = task->acl;
79092+ do {
79093+ obj = lookup_acl_obj_label(ino, dev, subj);
79094+ if (obj != NULL)
79095+ return (obj->mode & GR_FIND) ? 1 : 0;
79096+ } while ((subj = subj->parent_subject));
79097+
79098+ /* this is purely an optimization since we're looking for an object
79099+ for the directory we're doing a readdir on
79100+ if it's possible for any globbed object to match the entry we're
79101+ filling into the directory, then the object we find here will be
79102+ an anchor point with attached globbed objects
79103+ */
79104+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
79105+ if (obj->globbed == NULL)
79106+ return (obj->mode & GR_FIND) ? 1 : 0;
79107+
79108+ is_not_root = ((obj->filename[0] == '/') &&
79109+ (obj->filename[1] == '\0')) ? 0 : 1;
79110+ bufsize = PAGE_SIZE - namelen - is_not_root;
79111+
79112+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
79113+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
79114+ return 1;
79115+
79116+ preempt_disable();
79117+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
79118+ bufsize);
79119+
79120+ bufsize = strlen(path);
79121+
79122+ /* if base is "/", don't append an additional slash */
79123+ if (is_not_root)
79124+ *(path + bufsize) = '/';
79125+ memcpy(path + bufsize + is_not_root, name, namelen);
79126+ *(path + bufsize + namelen + is_not_root) = '\0';
79127+
79128+ tmp = obj->globbed;
79129+ while (tmp) {
79130+ if (!glob_match(tmp->filename, path)) {
79131+ preempt_enable();
79132+ return (tmp->mode & GR_FIND) ? 1 : 0;
79133+ }
79134+ tmp = tmp->next;
79135+ }
79136+ preempt_enable();
79137+ return (obj->mode & GR_FIND) ? 1 : 0;
79138+}
79139+
79140+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
79141+EXPORT_SYMBOL(gr_acl_is_enabled);
79142+#endif
79143+EXPORT_SYMBOL(gr_learn_resource);
79144+EXPORT_SYMBOL(gr_set_kernel_label);
79145+#ifdef CONFIG_SECURITY
79146+EXPORT_SYMBOL(gr_check_user_change);
79147+EXPORT_SYMBOL(gr_check_group_change);
79148+#endif
79149+
79150diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
79151new file mode 100644
79152index 0000000..34fefda
79153--- /dev/null
79154+++ b/grsecurity/gracl_alloc.c
79155@@ -0,0 +1,105 @@
79156+#include <linux/kernel.h>
79157+#include <linux/mm.h>
79158+#include <linux/slab.h>
79159+#include <linux/vmalloc.h>
79160+#include <linux/gracl.h>
79161+#include <linux/grsecurity.h>
79162+
79163+static unsigned long alloc_stack_next = 1;
79164+static unsigned long alloc_stack_size = 1;
79165+static void **alloc_stack;
79166+
79167+static __inline__ int
79168+alloc_pop(void)
79169+{
79170+ if (alloc_stack_next == 1)
79171+ return 0;
79172+
79173+ kfree(alloc_stack[alloc_stack_next - 2]);
79174+
79175+ alloc_stack_next--;
79176+
79177+ return 1;
79178+}
79179+
79180+static __inline__ int
79181+alloc_push(void *buf)
79182+{
79183+ if (alloc_stack_next >= alloc_stack_size)
79184+ return 1;
79185+
79186+ alloc_stack[alloc_stack_next - 1] = buf;
79187+
79188+ alloc_stack_next++;
79189+
79190+ return 0;
79191+}
79192+
79193+void *
79194+acl_alloc(unsigned long len)
79195+{
79196+ void *ret = NULL;
79197+
79198+ if (!len || len > PAGE_SIZE)
79199+ goto out;
79200+
79201+ ret = kmalloc(len, GFP_KERNEL);
79202+
79203+ if (ret) {
79204+ if (alloc_push(ret)) {
79205+ kfree(ret);
79206+ ret = NULL;
79207+ }
79208+ }
79209+
79210+out:
79211+ return ret;
79212+}
79213+
79214+void *
79215+acl_alloc_num(unsigned long num, unsigned long len)
79216+{
79217+ if (!len || (num > (PAGE_SIZE / len)))
79218+ return NULL;
79219+
79220+ return acl_alloc(num * len);
79221+}
79222+
79223+void
79224+acl_free_all(void)
79225+{
79226+ if (gr_acl_is_enabled() || !alloc_stack)
79227+ return;
79228+
79229+ while (alloc_pop()) ;
79230+
79231+ if (alloc_stack) {
79232+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
79233+ kfree(alloc_stack);
79234+ else
79235+ vfree(alloc_stack);
79236+ }
79237+
79238+ alloc_stack = NULL;
79239+ alloc_stack_size = 1;
79240+ alloc_stack_next = 1;
79241+
79242+ return;
79243+}
79244+
79245+int
79246+acl_alloc_stack_init(unsigned long size)
79247+{
79248+ if ((size * sizeof (void *)) <= PAGE_SIZE)
79249+ alloc_stack =
79250+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
79251+ else
79252+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
79253+
79254+ alloc_stack_size = size;
79255+
79256+ if (!alloc_stack)
79257+ return 0;
79258+ else
79259+ return 1;
79260+}
79261diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
79262new file mode 100644
79263index 0000000..955ddfb
79264--- /dev/null
79265+++ b/grsecurity/gracl_cap.c
79266@@ -0,0 +1,101 @@
79267+#include <linux/kernel.h>
79268+#include <linux/module.h>
79269+#include <linux/sched.h>
79270+#include <linux/gracl.h>
79271+#include <linux/grsecurity.h>
79272+#include <linux/grinternal.h>
79273+
79274+extern const char *captab_log[];
79275+extern int captab_log_entries;
79276+
79277+int
79278+gr_acl_is_capable(const int cap)
79279+{
79280+ struct task_struct *task = current;
79281+ const struct cred *cred = current_cred();
79282+ struct acl_subject_label *curracl;
79283+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
79284+ kernel_cap_t cap_audit = __cap_empty_set;
79285+
79286+ if (!gr_acl_is_enabled())
79287+ return 1;
79288+
79289+ curracl = task->acl;
79290+
79291+ cap_drop = curracl->cap_lower;
79292+ cap_mask = curracl->cap_mask;
79293+ cap_audit = curracl->cap_invert_audit;
79294+
79295+ while ((curracl = curracl->parent_subject)) {
79296+ /* if the cap isn't specified in the current computed mask but is specified in the
79297+ current level subject, and is lowered in the current level subject, then add
79298+ it to the set of dropped capabilities
79299+ otherwise, add the current level subject's mask to the current computed mask
79300+ */
79301+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
79302+ cap_raise(cap_mask, cap);
79303+ if (cap_raised(curracl->cap_lower, cap))
79304+ cap_raise(cap_drop, cap);
79305+ if (cap_raised(curracl->cap_invert_audit, cap))
79306+ cap_raise(cap_audit, cap);
79307+ }
79308+ }
79309+
79310+ if (!cap_raised(cap_drop, cap)) {
79311+ if (cap_raised(cap_audit, cap))
79312+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
79313+ return 1;
79314+ }
79315+
79316+ curracl = task->acl;
79317+
79318+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
79319+ && cap_raised(cred->cap_effective, cap)) {
79320+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
79321+ task->role->roletype, cred->uid,
79322+ cred->gid, task->exec_file ?
79323+ gr_to_filename(task->exec_file->f_path.dentry,
79324+ task->exec_file->f_path.mnt) : curracl->filename,
79325+ curracl->filename, 0UL,
79326+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
79327+ return 1;
79328+ }
79329+
79330+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
79331+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
79332+ return 0;
79333+}
79334+
79335+int
79336+gr_acl_is_capable_nolog(const int cap)
79337+{
79338+ struct acl_subject_label *curracl;
79339+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
79340+
79341+ if (!gr_acl_is_enabled())
79342+ return 1;
79343+
79344+ curracl = current->acl;
79345+
79346+ cap_drop = curracl->cap_lower;
79347+ cap_mask = curracl->cap_mask;
79348+
79349+ while ((curracl = curracl->parent_subject)) {
79350+ /* if the cap isn't specified in the current computed mask but is specified in the
79351+ current level subject, and is lowered in the current level subject, then add
79352+ it to the set of dropped capabilities
79353+ otherwise, add the current level subject's mask to the current computed mask
79354+ */
79355+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
79356+ cap_raise(cap_mask, cap);
79357+ if (cap_raised(curracl->cap_lower, cap))
79358+ cap_raise(cap_drop, cap);
79359+ }
79360+ }
79361+
79362+ if (!cap_raised(cap_drop, cap))
79363+ return 1;
79364+
79365+ return 0;
79366+}
79367+
79368diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
79369new file mode 100644
79370index 0000000..523e7e8
79371--- /dev/null
79372+++ b/grsecurity/gracl_fs.c
79373@@ -0,0 +1,435 @@
79374+#include <linux/kernel.h>
79375+#include <linux/sched.h>
79376+#include <linux/types.h>
79377+#include <linux/fs.h>
79378+#include <linux/file.h>
79379+#include <linux/stat.h>
79380+#include <linux/grsecurity.h>
79381+#include <linux/grinternal.h>
79382+#include <linux/gracl.h>
79383+
79384+umode_t
79385+gr_acl_umask(void)
79386+{
79387+ if (unlikely(!gr_acl_is_enabled()))
79388+ return 0;
79389+
79390+ return current->role->umask;
79391+}
79392+
79393+__u32
79394+gr_acl_handle_hidden_file(const struct dentry * dentry,
79395+ const struct vfsmount * mnt)
79396+{
79397+ __u32 mode;
79398+
79399+ if (unlikely(!dentry->d_inode))
79400+ return GR_FIND;
79401+
79402+ mode =
79403+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
79404+
79405+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
79406+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
79407+ return mode;
79408+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
79409+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
79410+ return 0;
79411+ } else if (unlikely(!(mode & GR_FIND)))
79412+ return 0;
79413+
79414+ return GR_FIND;
79415+}
79416+
79417+__u32
79418+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
79419+ int acc_mode)
79420+{
79421+ __u32 reqmode = GR_FIND;
79422+ __u32 mode;
79423+
79424+ if (unlikely(!dentry->d_inode))
79425+ return reqmode;
79426+
79427+ if (acc_mode & MAY_APPEND)
79428+ reqmode |= GR_APPEND;
79429+ else if (acc_mode & MAY_WRITE)
79430+ reqmode |= GR_WRITE;
79431+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
79432+ reqmode |= GR_READ;
79433+
79434+ mode =
79435+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
79436+ mnt);
79437+
79438+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
79439+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
79440+ reqmode & GR_READ ? " reading" : "",
79441+ reqmode & GR_WRITE ? " writing" : reqmode &
79442+ GR_APPEND ? " appending" : "");
79443+ return reqmode;
79444+ } else
79445+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
79446+ {
79447+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
79448+ reqmode & GR_READ ? " reading" : "",
79449+ reqmode & GR_WRITE ? " writing" : reqmode &
79450+ GR_APPEND ? " appending" : "");
79451+ return 0;
79452+ } else if (unlikely((mode & reqmode) != reqmode))
79453+ return 0;
79454+
79455+ return reqmode;
79456+}
79457+
79458+__u32
79459+gr_acl_handle_creat(const struct dentry * dentry,
79460+ const struct dentry * p_dentry,
79461+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
79462+ const int imode)
79463+{
79464+ __u32 reqmode = GR_WRITE | GR_CREATE;
79465+ __u32 mode;
79466+
79467+ if (acc_mode & MAY_APPEND)
79468+ reqmode |= GR_APPEND;
79469+ // if a directory was required or the directory already exists, then
79470+ // don't count this open as a read
79471+ if ((acc_mode & MAY_READ) &&
79472+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
79473+ reqmode |= GR_READ;
79474+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
79475+ reqmode |= GR_SETID;
79476+
79477+ mode =
79478+ gr_check_create(dentry, p_dentry, p_mnt,
79479+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
79480+
79481+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
79482+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
79483+ reqmode & GR_READ ? " reading" : "",
79484+ reqmode & GR_WRITE ? " writing" : reqmode &
79485+ GR_APPEND ? " appending" : "");
79486+ return reqmode;
79487+ } else
79488+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
79489+ {
79490+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
79491+ reqmode & GR_READ ? " reading" : "",
79492+ reqmode & GR_WRITE ? " writing" : reqmode &
79493+ GR_APPEND ? " appending" : "");
79494+ return 0;
79495+ } else if (unlikely((mode & reqmode) != reqmode))
79496+ return 0;
79497+
79498+ return reqmode;
79499+}
79500+
79501+__u32
79502+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
79503+ const int fmode)
79504+{
79505+ __u32 mode, reqmode = GR_FIND;
79506+
79507+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
79508+ reqmode |= GR_EXEC;
79509+ if (fmode & S_IWOTH)
79510+ reqmode |= GR_WRITE;
79511+ if (fmode & S_IROTH)
79512+ reqmode |= GR_READ;
79513+
79514+ mode =
79515+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
79516+ mnt);
79517+
79518+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
79519+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
79520+ reqmode & GR_READ ? " reading" : "",
79521+ reqmode & GR_WRITE ? " writing" : "",
79522+ reqmode & GR_EXEC ? " executing" : "");
79523+ return reqmode;
79524+ } else
79525+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
79526+ {
79527+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
79528+ reqmode & GR_READ ? " reading" : "",
79529+ reqmode & GR_WRITE ? " writing" : "",
79530+ reqmode & GR_EXEC ? " executing" : "");
79531+ return 0;
79532+ } else if (unlikely((mode & reqmode) != reqmode))
79533+ return 0;
79534+
79535+ return reqmode;
79536+}
79537+
79538+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
79539+{
79540+ __u32 mode;
79541+
79542+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
79543+
79544+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
79545+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
79546+ return mode;
79547+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
79548+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
79549+ return 0;
79550+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
79551+ return 0;
79552+
79553+ return (reqmode);
79554+}
79555+
79556+__u32
79557+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
79558+{
79559+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
79560+}
79561+
79562+__u32
79563+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
79564+{
79565+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
79566+}
79567+
79568+__u32
79569+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
79570+{
79571+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
79572+}
79573+
79574+__u32
79575+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
79576+{
79577+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
79578+}
79579+
79580+__u32
79581+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
79582+ umode_t *modeptr)
79583+{
79584+ mode_t mode;
79585+
79586+ *modeptr &= ~(mode_t)gr_acl_umask();
79587+ mode = *modeptr;
79588+
79589+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
79590+ return 1;
79591+
79592+ if (unlikely(mode & (S_ISUID | S_ISGID))) {
79593+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
79594+ GR_CHMOD_ACL_MSG);
79595+ } else {
79596+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
79597+ }
79598+}
79599+
79600+__u32
79601+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
79602+{
79603+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
79604+}
79605+
79606+__u32
79607+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
79608+{
79609+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
79610+}
79611+
79612+__u32
79613+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
79614+{
79615+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
79616+}
79617+
79618+__u32
79619+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
79620+{
79621+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
79622+ GR_UNIXCONNECT_ACL_MSG);
79623+}
79624+
79625+/* hardlinks require at minimum create and link permission,
79626+ any additional privilege required is based on the
79627+ privilege of the file being linked to
79628+*/
79629+__u32
79630+gr_acl_handle_link(const struct dentry * new_dentry,
79631+ const struct dentry * parent_dentry,
79632+ const struct vfsmount * parent_mnt,
79633+ const struct dentry * old_dentry,
79634+ const struct vfsmount * old_mnt, const char *to)
79635+{
79636+ __u32 mode;
79637+ __u32 needmode = GR_CREATE | GR_LINK;
79638+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
79639+
79640+ mode =
79641+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
79642+ old_mnt);
79643+
79644+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
79645+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
79646+ return mode;
79647+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
79648+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
79649+ return 0;
79650+ } else if (unlikely((mode & needmode) != needmode))
79651+ return 0;
79652+
79653+ return 1;
79654+}
79655+
79656+__u32
79657+gr_acl_handle_symlink(const struct dentry * new_dentry,
79658+ const struct dentry * parent_dentry,
79659+ const struct vfsmount * parent_mnt, const char *from)
79660+{
79661+ __u32 needmode = GR_WRITE | GR_CREATE;
79662+ __u32 mode;
79663+
79664+ mode =
79665+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
79666+ GR_CREATE | GR_AUDIT_CREATE |
79667+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
79668+
79669+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
79670+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
79671+ return mode;
79672+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
79673+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
79674+ return 0;
79675+ } else if (unlikely((mode & needmode) != needmode))
79676+ return 0;
79677+
79678+ return (GR_WRITE | GR_CREATE);
79679+}
79680+
79681+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
79682+{
79683+ __u32 mode;
79684+
79685+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
79686+
79687+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
79688+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
79689+ return mode;
79690+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
79691+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
79692+ return 0;
79693+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
79694+ return 0;
79695+
79696+ return (reqmode);
79697+}
79698+
79699+__u32
79700+gr_acl_handle_mknod(const struct dentry * new_dentry,
79701+ const struct dentry * parent_dentry,
79702+ const struct vfsmount * parent_mnt,
79703+ const int mode)
79704+{
79705+ __u32 reqmode = GR_WRITE | GR_CREATE;
79706+ if (unlikely(mode & (S_ISUID | S_ISGID)))
79707+ reqmode |= GR_SETID;
79708+
79709+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
79710+ reqmode, GR_MKNOD_ACL_MSG);
79711+}
79712+
79713+__u32
79714+gr_acl_handle_mkdir(const struct dentry *new_dentry,
79715+ const struct dentry *parent_dentry,
79716+ const struct vfsmount *parent_mnt)
79717+{
79718+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
79719+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
79720+}
79721+
79722+#define RENAME_CHECK_SUCCESS(old, new) \
79723+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
79724+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
79725+
79726+int
79727+gr_acl_handle_rename(struct dentry *new_dentry,
79728+ struct dentry *parent_dentry,
79729+ const struct vfsmount *parent_mnt,
79730+ struct dentry *old_dentry,
79731+ struct inode *old_parent_inode,
79732+ struct vfsmount *old_mnt, const char *newname)
79733+{
79734+ __u32 comp1, comp2;
79735+ int error = 0;
79736+
79737+ if (unlikely(!gr_acl_is_enabled()))
79738+ return 0;
79739+
79740+ if (!new_dentry->d_inode) {
79741+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
79742+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
79743+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
79744+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
79745+ GR_DELETE | GR_AUDIT_DELETE |
79746+ GR_AUDIT_READ | GR_AUDIT_WRITE |
79747+ GR_SUPPRESS, old_mnt);
79748+ } else {
79749+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
79750+ GR_CREATE | GR_DELETE |
79751+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
79752+ GR_AUDIT_READ | GR_AUDIT_WRITE |
79753+ GR_SUPPRESS, parent_mnt);
79754+ comp2 =
79755+ gr_search_file(old_dentry,
79756+ GR_READ | GR_WRITE | GR_AUDIT_READ |
79757+ GR_DELETE | GR_AUDIT_DELETE |
79758+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
79759+ }
79760+
79761+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
79762+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
79763+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
79764+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
79765+ && !(comp2 & GR_SUPPRESS)) {
79766+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
79767+ error = -EACCES;
79768+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
79769+ error = -EACCES;
79770+
79771+ return error;
79772+}
79773+
79774+void
79775+gr_acl_handle_exit(void)
79776+{
79777+ u16 id;
79778+ char *rolename;
79779+ struct file *exec_file;
79780+
79781+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
79782+ !(current->role->roletype & GR_ROLE_PERSIST))) {
79783+ id = current->acl_role_id;
79784+ rolename = current->role->rolename;
79785+ gr_set_acls(1);
79786+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
79787+ }
79788+
79789+ write_lock(&grsec_exec_file_lock);
79790+ exec_file = current->exec_file;
79791+ current->exec_file = NULL;
79792+ write_unlock(&grsec_exec_file_lock);
79793+
79794+ if (exec_file)
79795+ fput(exec_file);
79796+}
79797+
79798+int
79799+gr_acl_handle_procpidmem(const struct task_struct *task)
79800+{
79801+ if (unlikely(!gr_acl_is_enabled()))
79802+ return 0;
79803+
79804+ if (task != current && task->acl->mode & GR_PROTPROCFD)
79805+ return -EACCES;
79806+
79807+ return 0;
79808+}
79809diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
79810new file mode 100644
79811index 0000000..cd07b96
79812--- /dev/null
79813+++ b/grsecurity/gracl_ip.c
79814@@ -0,0 +1,382 @@
79815+#include <linux/kernel.h>
79816+#include <asm/uaccess.h>
79817+#include <asm/errno.h>
79818+#include <net/sock.h>
79819+#include <linux/file.h>
79820+#include <linux/fs.h>
79821+#include <linux/net.h>
79822+#include <linux/in.h>
79823+#include <linux/skbuff.h>
79824+#include <linux/ip.h>
79825+#include <linux/udp.h>
79826+#include <linux/smp_lock.h>
79827+#include <linux/types.h>
79828+#include <linux/sched.h>
79829+#include <linux/netdevice.h>
79830+#include <linux/inetdevice.h>
79831+#include <linux/gracl.h>
79832+#include <linux/grsecurity.h>
79833+#include <linux/grinternal.h>
79834+
79835+#define GR_BIND 0x01
79836+#define GR_CONNECT 0x02
79837+#define GR_INVERT 0x04
79838+#define GR_BINDOVERRIDE 0x08
79839+#define GR_CONNECTOVERRIDE 0x10
79840+#define GR_SOCK_FAMILY 0x20
79841+
79842+static const char * gr_protocols[IPPROTO_MAX] = {
79843+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
79844+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
79845+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
79846+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
79847+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
79848+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
79849+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
79850+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
79851+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
79852+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
79853+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
79854+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
79855+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
79856+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
79857+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
79858+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
79859+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
79860+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
79861+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
79862+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
79863+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
79864+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
79865+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
79866+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
79867+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
79868+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
79869+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
79870+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
79871+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
79872+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
79873+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
79874+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
79875+ };
79876+
79877+static const char * gr_socktypes[SOCK_MAX] = {
79878+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
79879+ "unknown:7", "unknown:8", "unknown:9", "packet"
79880+ };
79881+
79882+static const char * gr_sockfamilies[AF_MAX+1] = {
79883+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
79884+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
79885+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
79886+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
79887+ };
79888+
79889+const char *
79890+gr_proto_to_name(unsigned char proto)
79891+{
79892+ return gr_protocols[proto];
79893+}
79894+
79895+const char *
79896+gr_socktype_to_name(unsigned char type)
79897+{
79898+ return gr_socktypes[type];
79899+}
79900+
79901+const char *
79902+gr_sockfamily_to_name(unsigned char family)
79903+{
79904+ return gr_sockfamilies[family];
79905+}
79906+
79907+int
79908+gr_search_socket(const int domain, const int type, const int protocol)
79909+{
79910+ struct acl_subject_label *curr;
79911+ const struct cred *cred = current_cred();
79912+
79913+ if (unlikely(!gr_acl_is_enabled()))
79914+ goto exit;
79915+
79916+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
79917+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
79918+ goto exit; // let the kernel handle it
79919+
79920+ curr = current->acl;
79921+
79922+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
79923+ /* the family is allowed, if this is PF_INET allow it only if
79924+ the extra sock type/protocol checks pass */
79925+ if (domain == PF_INET)
79926+ goto inet_check;
79927+ goto exit;
79928+ } else {
79929+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
79930+ __u32 fakeip = 0;
79931+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
79932+ current->role->roletype, cred->uid,
79933+ cred->gid, current->exec_file ?
79934+ gr_to_filename(current->exec_file->f_path.dentry,
79935+ current->exec_file->f_path.mnt) :
79936+ curr->filename, curr->filename,
79937+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
79938+ &current->signal->saved_ip);
79939+ goto exit;
79940+ }
79941+ goto exit_fail;
79942+ }
79943+
79944+inet_check:
79945+ /* the rest of this checking is for IPv4 only */
79946+ if (!curr->ips)
79947+ goto exit;
79948+
79949+ if ((curr->ip_type & (1 << type)) &&
79950+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
79951+ goto exit;
79952+
79953+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
79954+ /* we don't place acls on raw sockets , and sometimes
79955+ dgram/ip sockets are opened for ioctl and not
79956+ bind/connect, so we'll fake a bind learn log */
79957+ if (type == SOCK_RAW || type == SOCK_PACKET) {
79958+ __u32 fakeip = 0;
79959+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
79960+ current->role->roletype, cred->uid,
79961+ cred->gid, current->exec_file ?
79962+ gr_to_filename(current->exec_file->f_path.dentry,
79963+ current->exec_file->f_path.mnt) :
79964+ curr->filename, curr->filename,
79965+ &fakeip, 0, type,
79966+ protocol, GR_CONNECT, &current->signal->saved_ip);
79967+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
79968+ __u32 fakeip = 0;
79969+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
79970+ current->role->roletype, cred->uid,
79971+ cred->gid, current->exec_file ?
79972+ gr_to_filename(current->exec_file->f_path.dentry,
79973+ current->exec_file->f_path.mnt) :
79974+ curr->filename, curr->filename,
79975+ &fakeip, 0, type,
79976+ protocol, GR_BIND, &current->signal->saved_ip);
79977+ }
79978+ /* we'll log when they use connect or bind */
79979+ goto exit;
79980+ }
79981+
79982+exit_fail:
79983+ if (domain == PF_INET)
79984+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
79985+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
79986+ else
79987+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
79988+ gr_socktype_to_name(type), protocol);
79989+
79990+ return 0;
79991+exit:
79992+ return 1;
79993+}
79994+
79995+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
79996+{
79997+ if ((ip->mode & mode) &&
79998+ (ip_port >= ip->low) &&
79999+ (ip_port <= ip->high) &&
80000+ ((ntohl(ip_addr) & our_netmask) ==
80001+ (ntohl(our_addr) & our_netmask))
80002+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
80003+ && (ip->type & (1 << type))) {
80004+ if (ip->mode & GR_INVERT)
80005+ return 2; // specifically denied
80006+ else
80007+ return 1; // allowed
80008+ }
80009+
80010+ return 0; // not specifically allowed, may continue parsing
80011+}
80012+
80013+static int
80014+gr_search_connectbind(const int full_mode, struct sock *sk,
80015+ struct sockaddr_in *addr, const int type)
80016+{
80017+ char iface[IFNAMSIZ] = {0};
80018+ struct acl_subject_label *curr;
80019+ struct acl_ip_label *ip;
80020+ struct inet_sock *isk;
80021+ struct net_device *dev;
80022+ struct in_device *idev;
80023+ unsigned long i;
80024+ int ret;
80025+ int mode = full_mode & (GR_BIND | GR_CONNECT);
80026+ __u32 ip_addr = 0;
80027+ __u32 our_addr;
80028+ __u32 our_netmask;
80029+ char *p;
80030+ __u16 ip_port = 0;
80031+ const struct cred *cred = current_cred();
80032+
80033+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
80034+ return 0;
80035+
80036+ curr = current->acl;
80037+ isk = inet_sk(sk);
80038+
80039+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
80040+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
80041+ addr->sin_addr.s_addr = curr->inaddr_any_override;
80042+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
80043+ struct sockaddr_in saddr;
80044+ int err;
80045+
80046+ saddr.sin_family = AF_INET;
80047+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
80048+ saddr.sin_port = isk->sport;
80049+
80050+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
80051+ if (err)
80052+ return err;
80053+
80054+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
80055+ if (err)
80056+ return err;
80057+ }
80058+
80059+ if (!curr->ips)
80060+ return 0;
80061+
80062+ ip_addr = addr->sin_addr.s_addr;
80063+ ip_port = ntohs(addr->sin_port);
80064+
80065+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
80066+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
80067+ current->role->roletype, cred->uid,
80068+ cred->gid, current->exec_file ?
80069+ gr_to_filename(current->exec_file->f_path.dentry,
80070+ current->exec_file->f_path.mnt) :
80071+ curr->filename, curr->filename,
80072+ &ip_addr, ip_port, type,
80073+ sk->sk_protocol, mode, &current->signal->saved_ip);
80074+ return 0;
80075+ }
80076+
80077+ for (i = 0; i < curr->ip_num; i++) {
80078+ ip = *(curr->ips + i);
80079+ if (ip->iface != NULL) {
80080+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
80081+ p = strchr(iface, ':');
80082+ if (p != NULL)
80083+ *p = '\0';
80084+ dev = dev_get_by_name(sock_net(sk), iface);
80085+ if (dev == NULL)
80086+ continue;
80087+ idev = in_dev_get(dev);
80088+ if (idev == NULL) {
80089+ dev_put(dev);
80090+ continue;
80091+ }
80092+ rcu_read_lock();
80093+ for_ifa(idev) {
80094+ if (!strcmp(ip->iface, ifa->ifa_label)) {
80095+ our_addr = ifa->ifa_address;
80096+ our_netmask = 0xffffffff;
80097+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
80098+ if (ret == 1) {
80099+ rcu_read_unlock();
80100+ in_dev_put(idev);
80101+ dev_put(dev);
80102+ return 0;
80103+ } else if (ret == 2) {
80104+ rcu_read_unlock();
80105+ in_dev_put(idev);
80106+ dev_put(dev);
80107+ goto denied;
80108+ }
80109+ }
80110+ } endfor_ifa(idev);
80111+ rcu_read_unlock();
80112+ in_dev_put(idev);
80113+ dev_put(dev);
80114+ } else {
80115+ our_addr = ip->addr;
80116+ our_netmask = ip->netmask;
80117+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
80118+ if (ret == 1)
80119+ return 0;
80120+ else if (ret == 2)
80121+ goto denied;
80122+ }
80123+ }
80124+
80125+denied:
80126+ if (mode == GR_BIND)
80127+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
80128+ else if (mode == GR_CONNECT)
80129+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
80130+
80131+ return -EACCES;
80132+}
80133+
80134+int
80135+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
80136+{
80137+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
80138+}
80139+
80140+int
80141+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
80142+{
80143+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
80144+}
80145+
80146+int gr_search_listen(struct socket *sock)
80147+{
80148+ struct sock *sk = sock->sk;
80149+ struct sockaddr_in addr;
80150+
80151+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
80152+ addr.sin_port = inet_sk(sk)->sport;
80153+
80154+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
80155+}
80156+
80157+int gr_search_accept(struct socket *sock)
80158+{
80159+ struct sock *sk = sock->sk;
80160+ struct sockaddr_in addr;
80161+
80162+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
80163+ addr.sin_port = inet_sk(sk)->sport;
80164+
80165+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
80166+}
80167+
80168+int
80169+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
80170+{
80171+ if (addr)
80172+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
80173+ else {
80174+ struct sockaddr_in sin;
80175+ const struct inet_sock *inet = inet_sk(sk);
80176+
80177+ sin.sin_addr.s_addr = inet->daddr;
80178+ sin.sin_port = inet->dport;
80179+
80180+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
80181+ }
80182+}
80183+
80184+int
80185+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
80186+{
80187+ struct sockaddr_in sin;
80188+
80189+ if (unlikely(skb->len < sizeof (struct udphdr)))
80190+ return 0; // skip this packet
80191+
80192+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
80193+ sin.sin_port = udp_hdr(skb)->source;
80194+
80195+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
80196+}
80197diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
80198new file mode 100644
80199index 0000000..34bdd46
80200--- /dev/null
80201+++ b/grsecurity/gracl_learn.c
80202@@ -0,0 +1,208 @@
80203+#include <linux/kernel.h>
80204+#include <linux/mm.h>
80205+#include <linux/sched.h>
80206+#include <linux/poll.h>
80207+#include <linux/smp_lock.h>
80208+#include <linux/string.h>
80209+#include <linux/file.h>
80210+#include <linux/types.h>
80211+#include <linux/vmalloc.h>
80212+#include <linux/grinternal.h>
80213+
80214+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
80215+ size_t count, loff_t *ppos);
80216+extern int gr_acl_is_enabled(void);
80217+
80218+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
80219+static int gr_learn_attached;
80220+
80221+/* use a 512k buffer */
80222+#define LEARN_BUFFER_SIZE (512 * 1024)
80223+
80224+static DEFINE_SPINLOCK(gr_learn_lock);
80225+static DEFINE_MUTEX(gr_learn_user_mutex);
80226+
80227+/* we need to maintain two buffers, so that the kernel context of grlearn
80228+ uses a semaphore around the userspace copying, and the other kernel contexts
80229+ use a spinlock when copying into the buffer, since they cannot sleep
80230+*/
80231+static char *learn_buffer;
80232+static char *learn_buffer_user;
80233+static int learn_buffer_len;
80234+static int learn_buffer_user_len;
80235+
80236+static ssize_t
80237+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
80238+{
80239+ DECLARE_WAITQUEUE(wait, current);
80240+ ssize_t retval = 0;
80241+
80242+ add_wait_queue(&learn_wait, &wait);
80243+ set_current_state(TASK_INTERRUPTIBLE);
80244+ do {
80245+ mutex_lock(&gr_learn_user_mutex);
80246+ spin_lock(&gr_learn_lock);
80247+ if (learn_buffer_len)
80248+ break;
80249+ spin_unlock(&gr_learn_lock);
80250+ mutex_unlock(&gr_learn_user_mutex);
80251+ if (file->f_flags & O_NONBLOCK) {
80252+ retval = -EAGAIN;
80253+ goto out;
80254+ }
80255+ if (signal_pending(current)) {
80256+ retval = -ERESTARTSYS;
80257+ goto out;
80258+ }
80259+
80260+ schedule();
80261+ } while (1);
80262+
80263+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
80264+ learn_buffer_user_len = learn_buffer_len;
80265+ retval = learn_buffer_len;
80266+ learn_buffer_len = 0;
80267+
80268+ spin_unlock(&gr_learn_lock);
80269+
80270+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
80271+ retval = -EFAULT;
80272+
80273+ mutex_unlock(&gr_learn_user_mutex);
80274+out:
80275+ set_current_state(TASK_RUNNING);
80276+ remove_wait_queue(&learn_wait, &wait);
80277+ return retval;
80278+}
80279+
80280+static unsigned int
80281+poll_learn(struct file * file, poll_table * wait)
80282+{
80283+ poll_wait(file, &learn_wait, wait);
80284+
80285+ if (learn_buffer_len)
80286+ return (POLLIN | POLLRDNORM);
80287+
80288+ return 0;
80289+}
80290+
80291+void
80292+gr_clear_learn_entries(void)
80293+{
80294+ char *tmp;
80295+
80296+ mutex_lock(&gr_learn_user_mutex);
80297+ spin_lock(&gr_learn_lock);
80298+ tmp = learn_buffer;
80299+ learn_buffer = NULL;
80300+ spin_unlock(&gr_learn_lock);
80301+ if (tmp)
80302+ vfree(tmp);
80303+ if (learn_buffer_user != NULL) {
80304+ vfree(learn_buffer_user);
80305+ learn_buffer_user = NULL;
80306+ }
80307+ learn_buffer_len = 0;
80308+ mutex_unlock(&gr_learn_user_mutex);
80309+
80310+ return;
80311+}
80312+
80313+void
80314+gr_add_learn_entry(const char *fmt, ...)
80315+{
80316+ va_list args;
80317+ unsigned int len;
80318+
80319+ if (!gr_learn_attached)
80320+ return;
80321+
80322+ spin_lock(&gr_learn_lock);
80323+
80324+ /* leave a gap at the end so we know when it's "full" but don't have to
80325+ compute the exact length of the string we're trying to append
80326+ */
80327+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
80328+ spin_unlock(&gr_learn_lock);
80329+ wake_up_interruptible(&learn_wait);
80330+ return;
80331+ }
80332+ if (learn_buffer == NULL) {
80333+ spin_unlock(&gr_learn_lock);
80334+ return;
80335+ }
80336+
80337+ va_start(args, fmt);
80338+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
80339+ va_end(args);
80340+
80341+ learn_buffer_len += len + 1;
80342+
80343+ spin_unlock(&gr_learn_lock);
80344+ wake_up_interruptible(&learn_wait);
80345+
80346+ return;
80347+}
80348+
80349+static int
80350+open_learn(struct inode *inode, struct file *file)
80351+{
80352+ if (file->f_mode & FMODE_READ && gr_learn_attached)
80353+ return -EBUSY;
80354+ if (file->f_mode & FMODE_READ) {
80355+ int retval = 0;
80356+ mutex_lock(&gr_learn_user_mutex);
80357+ if (learn_buffer == NULL)
80358+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
80359+ if (learn_buffer_user == NULL)
80360+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
80361+ if (learn_buffer == NULL) {
80362+ retval = -ENOMEM;
80363+ goto out_error;
80364+ }
80365+ if (learn_buffer_user == NULL) {
80366+ retval = -ENOMEM;
80367+ goto out_error;
80368+ }
80369+ learn_buffer_len = 0;
80370+ learn_buffer_user_len = 0;
80371+ gr_learn_attached = 1;
80372+out_error:
80373+ mutex_unlock(&gr_learn_user_mutex);
80374+ return retval;
80375+ }
80376+ return 0;
80377+}
80378+
80379+static int
80380+close_learn(struct inode *inode, struct file *file)
80381+{
80382+ if (file->f_mode & FMODE_READ) {
80383+ char *tmp = NULL;
80384+ mutex_lock(&gr_learn_user_mutex);
80385+ spin_lock(&gr_learn_lock);
80386+ tmp = learn_buffer;
80387+ learn_buffer = NULL;
80388+ spin_unlock(&gr_learn_lock);
80389+ if (tmp)
80390+ vfree(tmp);
80391+ if (learn_buffer_user != NULL) {
80392+ vfree(learn_buffer_user);
80393+ learn_buffer_user = NULL;
80394+ }
80395+ learn_buffer_len = 0;
80396+ learn_buffer_user_len = 0;
80397+ gr_learn_attached = 0;
80398+ mutex_unlock(&gr_learn_user_mutex);
80399+ }
80400+
80401+ return 0;
80402+}
80403+
80404+const struct file_operations grsec_fops = {
80405+ .read = read_learn,
80406+ .write = write_grsec_handler,
80407+ .open = open_learn,
80408+ .release = close_learn,
80409+ .poll = poll_learn,
80410+};
80411diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
80412new file mode 100644
80413index 0000000..70b2179
80414--- /dev/null
80415+++ b/grsecurity/gracl_res.c
80416@@ -0,0 +1,67 @@
80417+#include <linux/kernel.h>
80418+#include <linux/sched.h>
80419+#include <linux/gracl.h>
80420+#include <linux/grinternal.h>
80421+
80422+static const char *restab_log[] = {
80423+ [RLIMIT_CPU] = "RLIMIT_CPU",
80424+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
80425+ [RLIMIT_DATA] = "RLIMIT_DATA",
80426+ [RLIMIT_STACK] = "RLIMIT_STACK",
80427+ [RLIMIT_CORE] = "RLIMIT_CORE",
80428+ [RLIMIT_RSS] = "RLIMIT_RSS",
80429+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
80430+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
80431+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
80432+ [RLIMIT_AS] = "RLIMIT_AS",
80433+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
80434+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
80435+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
80436+ [RLIMIT_NICE] = "RLIMIT_NICE",
80437+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
80438+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
80439+ [GR_CRASH_RES] = "RLIMIT_CRASH"
80440+};
80441+
80442+void
80443+gr_log_resource(const struct task_struct *task,
80444+ const int res, const unsigned long wanted, const int gt)
80445+{
80446+ const struct cred *cred;
80447+ unsigned long rlim;
80448+
80449+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
80450+ return;
80451+
80452+ // not yet supported resource
80453+ if (unlikely(!restab_log[res]))
80454+ return;
80455+
80456+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
80457+ rlim = task->signal->rlim[res].rlim_max;
80458+ else
80459+ rlim = task->signal->rlim[res].rlim_cur;
80460+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
80461+ return;
80462+
80463+ rcu_read_lock();
80464+ cred = __task_cred(task);
80465+
80466+ if (res == RLIMIT_NPROC &&
80467+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
80468+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
80469+ goto out_rcu_unlock;
80470+ else if (res == RLIMIT_MEMLOCK &&
80471+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
80472+ goto out_rcu_unlock;
80473+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
80474+ goto out_rcu_unlock;
80475+ rcu_read_unlock();
80476+
80477+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
80478+
80479+ return;
80480+out_rcu_unlock:
80481+ rcu_read_unlock();
80482+ return;
80483+}
80484diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
80485new file mode 100644
80486index 0000000..1d1b734
80487--- /dev/null
80488+++ b/grsecurity/gracl_segv.c
80489@@ -0,0 +1,284 @@
80490+#include <linux/kernel.h>
80491+#include <linux/mm.h>
80492+#include <asm/uaccess.h>
80493+#include <asm/errno.h>
80494+#include <asm/mman.h>
80495+#include <net/sock.h>
80496+#include <linux/file.h>
80497+#include <linux/fs.h>
80498+#include <linux/net.h>
80499+#include <linux/in.h>
80500+#include <linux/smp_lock.h>
80501+#include <linux/slab.h>
80502+#include <linux/types.h>
80503+#include <linux/sched.h>
80504+#include <linux/timer.h>
80505+#include <linux/gracl.h>
80506+#include <linux/grsecurity.h>
80507+#include <linux/grinternal.h>
80508+
80509+static struct crash_uid *uid_set;
80510+static unsigned short uid_used;
80511+static DEFINE_SPINLOCK(gr_uid_lock);
80512+extern rwlock_t gr_inode_lock;
80513+extern struct acl_subject_label *
80514+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
80515+ struct acl_role_label *role);
80516+extern int gr_fake_force_sig(int sig, struct task_struct *t);
80517+
80518+int
80519+gr_init_uidset(void)
80520+{
80521+ uid_set =
80522+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
80523+ uid_used = 0;
80524+
80525+ return uid_set ? 1 : 0;
80526+}
80527+
80528+void
80529+gr_free_uidset(void)
80530+{
80531+ if (uid_set)
80532+ kfree(uid_set);
80533+
80534+ return;
80535+}
80536+
80537+int
80538+gr_find_uid(const uid_t uid)
80539+{
80540+ struct crash_uid *tmp = uid_set;
80541+ uid_t buid;
80542+ int low = 0, high = uid_used - 1, mid;
80543+
80544+ while (high >= low) {
80545+ mid = (low + high) >> 1;
80546+ buid = tmp[mid].uid;
80547+ if (buid == uid)
80548+ return mid;
80549+ if (buid > uid)
80550+ high = mid - 1;
80551+ if (buid < uid)
80552+ low = mid + 1;
80553+ }
80554+
80555+ return -1;
80556+}
80557+
80558+static __inline__ void
80559+gr_insertsort(void)
80560+{
80561+ unsigned short i, j;
80562+ struct crash_uid index;
80563+
80564+ for (i = 1; i < uid_used; i++) {
80565+ index = uid_set[i];
80566+ j = i;
80567+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
80568+ uid_set[j] = uid_set[j - 1];
80569+ j--;
80570+ }
80571+ uid_set[j] = index;
80572+ }
80573+
80574+ return;
80575+}
80576+
80577+static __inline__ void
80578+gr_insert_uid(const uid_t uid, const unsigned long expires)
80579+{
80580+ int loc;
80581+
80582+ if (uid_used == GR_UIDTABLE_MAX)
80583+ return;
80584+
80585+ loc = gr_find_uid(uid);
80586+
80587+ if (loc >= 0) {
80588+ uid_set[loc].expires = expires;
80589+ return;
80590+ }
80591+
80592+ uid_set[uid_used].uid = uid;
80593+ uid_set[uid_used].expires = expires;
80594+ uid_used++;
80595+
80596+ gr_insertsort();
80597+
80598+ return;
80599+}
80600+
80601+void
80602+gr_remove_uid(const unsigned short loc)
80603+{
80604+ unsigned short i;
80605+
80606+ for (i = loc + 1; i < uid_used; i++)
80607+ uid_set[i - 1] = uid_set[i];
80608+
80609+ uid_used--;
80610+
80611+ return;
80612+}
80613+
80614+int
80615+gr_check_crash_uid(const uid_t uid)
80616+{
80617+ int loc;
80618+ int ret = 0;
80619+
80620+ if (unlikely(!gr_acl_is_enabled()))
80621+ return 0;
80622+
80623+ spin_lock(&gr_uid_lock);
80624+ loc = gr_find_uid(uid);
80625+
80626+ if (loc < 0)
80627+ goto out_unlock;
80628+
80629+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
80630+ gr_remove_uid(loc);
80631+ else
80632+ ret = 1;
80633+
80634+out_unlock:
80635+ spin_unlock(&gr_uid_lock);
80636+ return ret;
80637+}
80638+
80639+static __inline__ int
80640+proc_is_setxid(const struct cred *cred)
80641+{
80642+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
80643+ cred->uid != cred->fsuid)
80644+ return 1;
80645+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
80646+ cred->gid != cred->fsgid)
80647+ return 1;
80648+
80649+ return 0;
80650+}
80651+
80652+void
80653+gr_handle_crash(struct task_struct *task, const int sig)
80654+{
80655+ struct acl_subject_label *curr;
80656+ struct task_struct *tsk, *tsk2;
80657+ const struct cred *cred;
80658+ const struct cred *cred2;
80659+
80660+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
80661+ return;
80662+
80663+ if (unlikely(!gr_acl_is_enabled()))
80664+ return;
80665+
80666+ curr = task->acl;
80667+
80668+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
80669+ return;
80670+
80671+ if (time_before_eq(curr->expires, get_seconds())) {
80672+ curr->expires = 0;
80673+ curr->crashes = 0;
80674+ }
80675+
80676+ curr->crashes++;
80677+
80678+ if (!curr->expires)
80679+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
80680+
80681+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
80682+ time_after(curr->expires, get_seconds())) {
80683+ rcu_read_lock();
80684+ cred = __task_cred(task);
80685+ if (cred->uid && proc_is_setxid(cred)) {
80686+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
80687+ spin_lock(&gr_uid_lock);
80688+ gr_insert_uid(cred->uid, curr->expires);
80689+ spin_unlock(&gr_uid_lock);
80690+ curr->expires = 0;
80691+ curr->crashes = 0;
80692+ read_lock(&tasklist_lock);
80693+ do_each_thread(tsk2, tsk) {
80694+ cred2 = __task_cred(tsk);
80695+ if (tsk != task && cred2->uid == cred->uid)
80696+ gr_fake_force_sig(SIGKILL, tsk);
80697+ } while_each_thread(tsk2, tsk);
80698+ read_unlock(&tasklist_lock);
80699+ } else {
80700+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
80701+ read_lock(&tasklist_lock);
80702+ read_lock(&grsec_exec_file_lock);
80703+ do_each_thread(tsk2, tsk) {
80704+ if (likely(tsk != task)) {
80705+ // if this thread has the same subject as the one that triggered
80706+ // RES_CRASH and it's the same binary, kill it
80707+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
80708+ gr_fake_force_sig(SIGKILL, tsk);
80709+ }
80710+ } while_each_thread(tsk2, tsk);
80711+ read_unlock(&grsec_exec_file_lock);
80712+ read_unlock(&tasklist_lock);
80713+ }
80714+ rcu_read_unlock();
80715+ }
80716+
80717+ return;
80718+}
80719+
80720+int
80721+gr_check_crash_exec(const struct file *filp)
80722+{
80723+ struct acl_subject_label *curr;
80724+
80725+ if (unlikely(!gr_acl_is_enabled()))
80726+ return 0;
80727+
80728+ read_lock(&gr_inode_lock);
80729+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
80730+ filp->f_path.dentry->d_inode->i_sb->s_dev,
80731+ current->role);
80732+ read_unlock(&gr_inode_lock);
80733+
80734+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
80735+ (!curr->crashes && !curr->expires))
80736+ return 0;
80737+
80738+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
80739+ time_after(curr->expires, get_seconds()))
80740+ return 1;
80741+ else if (time_before_eq(curr->expires, get_seconds())) {
80742+ curr->crashes = 0;
80743+ curr->expires = 0;
80744+ }
80745+
80746+ return 0;
80747+}
80748+
80749+void
80750+gr_handle_alertkill(struct task_struct *task)
80751+{
80752+ struct acl_subject_label *curracl;
80753+ __u32 curr_ip;
80754+ struct task_struct *p, *p2;
80755+
80756+ if (unlikely(!gr_acl_is_enabled()))
80757+ return;
80758+
80759+ curracl = task->acl;
80760+ curr_ip = task->signal->curr_ip;
80761+
80762+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
80763+ read_lock(&tasklist_lock);
80764+ do_each_thread(p2, p) {
80765+ if (p->signal->curr_ip == curr_ip)
80766+ gr_fake_force_sig(SIGKILL, p);
80767+ } while_each_thread(p2, p);
80768+ read_unlock(&tasklist_lock);
80769+ } else if (curracl->mode & GR_KILLPROC)
80770+ gr_fake_force_sig(SIGKILL, task);
80771+
80772+ return;
80773+}
80774diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
80775new file mode 100644
80776index 0000000..9d83a69
80777--- /dev/null
80778+++ b/grsecurity/gracl_shm.c
80779@@ -0,0 +1,40 @@
80780+#include <linux/kernel.h>
80781+#include <linux/mm.h>
80782+#include <linux/sched.h>
80783+#include <linux/file.h>
80784+#include <linux/ipc.h>
80785+#include <linux/gracl.h>
80786+#include <linux/grsecurity.h>
80787+#include <linux/grinternal.h>
80788+
80789+int
80790+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
80791+ const time_t shm_createtime, const uid_t cuid, const int shmid)
80792+{
80793+ struct task_struct *task;
80794+
80795+ if (!gr_acl_is_enabled())
80796+ return 1;
80797+
80798+ rcu_read_lock();
80799+ read_lock(&tasklist_lock);
80800+
80801+ task = find_task_by_vpid(shm_cprid);
80802+
80803+ if (unlikely(!task))
80804+ task = find_task_by_vpid(shm_lapid);
80805+
80806+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
80807+ (task->pid == shm_lapid)) &&
80808+ (task->acl->mode & GR_PROTSHM) &&
80809+ (task->acl != current->acl))) {
80810+ read_unlock(&tasklist_lock);
80811+ rcu_read_unlock();
80812+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
80813+ return 0;
80814+ }
80815+ read_unlock(&tasklist_lock);
80816+ rcu_read_unlock();
80817+
80818+ return 1;
80819+}
80820diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
80821new file mode 100644
80822index 0000000..bc0be01
80823--- /dev/null
80824+++ b/grsecurity/grsec_chdir.c
80825@@ -0,0 +1,19 @@
80826+#include <linux/kernel.h>
80827+#include <linux/sched.h>
80828+#include <linux/fs.h>
80829+#include <linux/file.h>
80830+#include <linux/grsecurity.h>
80831+#include <linux/grinternal.h>
80832+
80833+void
80834+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
80835+{
80836+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
80837+ if ((grsec_enable_chdir && grsec_enable_group &&
80838+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
80839+ !grsec_enable_group)) {
80840+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
80841+ }
80842+#endif
80843+ return;
80844+}
80845diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
80846new file mode 100644
80847index 0000000..197bdd5
80848--- /dev/null
80849+++ b/grsecurity/grsec_chroot.c
80850@@ -0,0 +1,386 @@
80851+#include <linux/kernel.h>
80852+#include <linux/module.h>
80853+#include <linux/sched.h>
80854+#include <linux/file.h>
80855+#include <linux/fs.h>
80856+#include <linux/mount.h>
80857+#include <linux/types.h>
80858+#include <linux/pid_namespace.h>
80859+#include <linux/grsecurity.h>
80860+#include <linux/grinternal.h>
80861+
80862+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
80863+{
80864+#ifdef CONFIG_GRKERNSEC
80865+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
80866+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
80867+ task->gr_is_chrooted = 1;
80868+ else
80869+ task->gr_is_chrooted = 0;
80870+
80871+ task->gr_chroot_dentry = path->dentry;
80872+#endif
80873+ return;
80874+}
80875+
80876+void gr_clear_chroot_entries(struct task_struct *task)
80877+{
80878+#ifdef CONFIG_GRKERNSEC
80879+ task->gr_is_chrooted = 0;
80880+ task->gr_chroot_dentry = NULL;
80881+#endif
80882+ return;
80883+}
80884+
80885+int
80886+gr_handle_chroot_unix(const pid_t pid)
80887+{
80888+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
80889+ struct task_struct *p;
80890+
80891+ if (unlikely(!grsec_enable_chroot_unix))
80892+ return 1;
80893+
80894+ if (likely(!proc_is_chrooted(current)))
80895+ return 1;
80896+
80897+ rcu_read_lock();
80898+ read_lock(&tasklist_lock);
80899+
80900+ p = find_task_by_vpid_unrestricted(pid);
80901+ if (unlikely(p && !have_same_root(current, p))) {
80902+ read_unlock(&tasklist_lock);
80903+ rcu_read_unlock();
80904+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
80905+ return 0;
80906+ }
80907+ read_unlock(&tasklist_lock);
80908+ rcu_read_unlock();
80909+#endif
80910+ return 1;
80911+}
80912+
80913+int
80914+gr_handle_chroot_nice(void)
80915+{
80916+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
80917+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
80918+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
80919+ return -EPERM;
80920+ }
80921+#endif
80922+ return 0;
80923+}
80924+
80925+int
80926+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
80927+{
80928+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
80929+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
80930+ && proc_is_chrooted(current)) {
80931+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
80932+ return -EACCES;
80933+ }
80934+#endif
80935+ return 0;
80936+}
80937+
80938+int
80939+gr_handle_chroot_rawio(const struct inode *inode)
80940+{
80941+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
80942+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
80943+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
80944+ return 1;
80945+#endif
80946+ return 0;
80947+}
80948+
80949+int
80950+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
80951+{
80952+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
80953+ struct task_struct *p;
80954+ int ret = 0;
80955+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
80956+ return ret;
80957+
80958+ read_lock(&tasklist_lock);
80959+ do_each_pid_task(pid, type, p) {
80960+ if (!have_same_root(current, p)) {
80961+ ret = 1;
80962+ goto out;
80963+ }
80964+ } while_each_pid_task(pid, type, p);
80965+out:
80966+ read_unlock(&tasklist_lock);
80967+ return ret;
80968+#endif
80969+ return 0;
80970+}
80971+
80972+int
80973+gr_pid_is_chrooted(struct task_struct *p)
80974+{
80975+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
80976+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
80977+ return 0;
80978+
80979+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
80980+ !have_same_root(current, p)) {
80981+ return 1;
80982+ }
80983+#endif
80984+ return 0;
80985+}
80986+
80987+EXPORT_SYMBOL(gr_pid_is_chrooted);
80988+
80989+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
80990+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
80991+{
80992+ struct dentry *dentry = (struct dentry *)u_dentry;
80993+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
80994+ struct dentry *realroot;
80995+ struct vfsmount *realrootmnt;
80996+ struct dentry *currentroot;
80997+ struct vfsmount *currentmnt;
80998+ struct task_struct *reaper = &init_task;
80999+ int ret = 1;
81000+
81001+ read_lock(&reaper->fs->lock);
81002+ realrootmnt = mntget(reaper->fs->root.mnt);
81003+ realroot = dget(reaper->fs->root.dentry);
81004+ read_unlock(&reaper->fs->lock);
81005+
81006+ read_lock(&current->fs->lock);
81007+ currentmnt = mntget(current->fs->root.mnt);
81008+ currentroot = dget(current->fs->root.dentry);
81009+ read_unlock(&current->fs->lock);
81010+
81011+ spin_lock(&dcache_lock);
81012+ for (;;) {
81013+ if (unlikely((dentry == realroot && mnt == realrootmnt)
81014+ || (dentry == currentroot && mnt == currentmnt)))
81015+ break;
81016+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
81017+ if (mnt->mnt_parent == mnt)
81018+ break;
81019+ dentry = mnt->mnt_mountpoint;
81020+ mnt = mnt->mnt_parent;
81021+ continue;
81022+ }
81023+ dentry = dentry->d_parent;
81024+ }
81025+ spin_unlock(&dcache_lock);
81026+
81027+ dput(currentroot);
81028+ mntput(currentmnt);
81029+
81030+ /* access is outside of chroot */
81031+ if (dentry == realroot && mnt == realrootmnt)
81032+ ret = 0;
81033+
81034+ dput(realroot);
81035+ mntput(realrootmnt);
81036+ return ret;
81037+}
81038+#endif
81039+
81040+int
81041+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
81042+{
81043+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
81044+ if (!grsec_enable_chroot_fchdir)
81045+ return 1;
81046+
81047+ if (!proc_is_chrooted(current))
81048+ return 1;
81049+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
81050+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
81051+ return 0;
81052+ }
81053+#endif
81054+ return 1;
81055+}
81056+
81057+int
81058+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
81059+ const time_t shm_createtime)
81060+{
81061+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
81062+ struct task_struct *p;
81063+ time_t starttime;
81064+
81065+ if (unlikely(!grsec_enable_chroot_shmat))
81066+ return 1;
81067+
81068+ if (likely(!proc_is_chrooted(current)))
81069+ return 1;
81070+
81071+ rcu_read_lock();
81072+ read_lock(&tasklist_lock);
81073+
81074+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
81075+ starttime = p->start_time.tv_sec;
81076+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
81077+ if (have_same_root(current, p)) {
81078+ goto allow;
81079+ } else {
81080+ read_unlock(&tasklist_lock);
81081+ rcu_read_unlock();
81082+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
81083+ return 0;
81084+ }
81085+ }
81086+ /* creator exited, pid reuse, fall through to next check */
81087+ }
81088+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
81089+ if (unlikely(!have_same_root(current, p))) {
81090+ read_unlock(&tasklist_lock);
81091+ rcu_read_unlock();
81092+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
81093+ return 0;
81094+ }
81095+ }
81096+
81097+allow:
81098+ read_unlock(&tasklist_lock);
81099+ rcu_read_unlock();
81100+#endif
81101+ return 1;
81102+}
81103+
81104+void
81105+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
81106+{
81107+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
81108+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
81109+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
81110+#endif
81111+ return;
81112+}
81113+
81114+int
81115+gr_handle_chroot_mknod(const struct dentry *dentry,
81116+ const struct vfsmount *mnt, const int mode)
81117+{
81118+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
81119+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
81120+ proc_is_chrooted(current)) {
81121+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
81122+ return -EPERM;
81123+ }
81124+#endif
81125+ return 0;
81126+}
81127+
81128+int
81129+gr_handle_chroot_mount(const struct dentry *dentry,
81130+ const struct vfsmount *mnt, const char *dev_name)
81131+{
81132+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
81133+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
81134+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
81135+ return -EPERM;
81136+ }
81137+#endif
81138+ return 0;
81139+}
81140+
81141+int
81142+gr_handle_chroot_pivot(void)
81143+{
81144+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
81145+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
81146+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
81147+ return -EPERM;
81148+ }
81149+#endif
81150+ return 0;
81151+}
81152+
81153+int
81154+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
81155+{
81156+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
81157+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
81158+ !gr_is_outside_chroot(dentry, mnt)) {
81159+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
81160+ return -EPERM;
81161+ }
81162+#endif
81163+ return 0;
81164+}
81165+
81166+extern const char *captab_log[];
81167+extern int captab_log_entries;
81168+
81169+int
81170+gr_chroot_is_capable(const int cap)
81171+{
81172+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
81173+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
81174+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
81175+ if (cap_raised(chroot_caps, cap)) {
81176+ const struct cred *creds = current_cred();
81177+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
81178+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
81179+ }
81180+ return 0;
81181+ }
81182+ }
81183+#endif
81184+ return 1;
81185+}
81186+
81187+int
81188+gr_chroot_is_capable_nolog(const int cap)
81189+{
81190+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
81191+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
81192+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
81193+ if (cap_raised(chroot_caps, cap)) {
81194+ return 0;
81195+ }
81196+ }
81197+#endif
81198+ return 1;
81199+}
81200+
81201+int
81202+gr_handle_chroot_sysctl(const int op)
81203+{
81204+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
81205+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
81206+ && (op & MAY_WRITE))
81207+ return -EACCES;
81208+#endif
81209+ return 0;
81210+}
81211+
81212+void
81213+gr_handle_chroot_chdir(struct path *path)
81214+{
81215+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
81216+ if (grsec_enable_chroot_chdir)
81217+ set_fs_pwd(current->fs, path);
81218+#endif
81219+ return;
81220+}
81221+
81222+int
81223+gr_handle_chroot_chmod(const struct dentry *dentry,
81224+ const struct vfsmount *mnt, const int mode)
81225+{
81226+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
81227+ /* allow chmod +s on directories, but not on files */
81228+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
81229+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
81230+ proc_is_chrooted(current)) {
81231+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
81232+ return -EPERM;
81233+ }
81234+#endif
81235+ return 0;
81236+}
81237diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
81238new file mode 100644
81239index 0000000..40545bf
81240--- /dev/null
81241+++ b/grsecurity/grsec_disabled.c
81242@@ -0,0 +1,437 @@
81243+#include <linux/kernel.h>
81244+#include <linux/module.h>
81245+#include <linux/sched.h>
81246+#include <linux/file.h>
81247+#include <linux/fs.h>
81248+#include <linux/kdev_t.h>
81249+#include <linux/net.h>
81250+#include <linux/in.h>
81251+#include <linux/ip.h>
81252+#include <linux/skbuff.h>
81253+#include <linux/sysctl.h>
81254+
81255+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
81256+void
81257+pax_set_initial_flags(struct linux_binprm *bprm)
81258+{
81259+ return;
81260+}
81261+#endif
81262+
81263+#ifdef CONFIG_SYSCTL
81264+__u32
81265+gr_handle_sysctl(const struct ctl_table * table, const int op)
81266+{
81267+ return 0;
81268+}
81269+#endif
81270+
81271+#ifdef CONFIG_TASKSTATS
81272+int gr_is_taskstats_denied(int pid)
81273+{
81274+ return 0;
81275+}
81276+#endif
81277+
81278+int
81279+gr_acl_is_enabled(void)
81280+{
81281+ return 0;
81282+}
81283+
81284+void
81285+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
81286+{
81287+ return;
81288+}
81289+
81290+int
81291+gr_handle_rawio(const struct inode *inode)
81292+{
81293+ return 0;
81294+}
81295+
81296+void
81297+gr_acl_handle_psacct(struct task_struct *task, const long code)
81298+{
81299+ return;
81300+}
81301+
81302+int
81303+gr_handle_ptrace(struct task_struct *task, const long request)
81304+{
81305+ return 0;
81306+}
81307+
81308+int
81309+gr_handle_proc_ptrace(struct task_struct *task)
81310+{
81311+ return 0;
81312+}
81313+
81314+void
81315+gr_learn_resource(const struct task_struct *task,
81316+ const int res, const unsigned long wanted, const int gt)
81317+{
81318+ return;
81319+}
81320+
81321+int
81322+gr_set_acls(const int type)
81323+{
81324+ return 0;
81325+}
81326+
81327+int
81328+gr_check_hidden_task(const struct task_struct *tsk)
81329+{
81330+ return 0;
81331+}
81332+
81333+int
81334+gr_check_protected_task(const struct task_struct *task)
81335+{
81336+ return 0;
81337+}
81338+
81339+int
81340+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
81341+{
81342+ return 0;
81343+}
81344+
81345+void
81346+gr_copy_label(struct task_struct *tsk)
81347+{
81348+ return;
81349+}
81350+
81351+void
81352+gr_set_pax_flags(struct task_struct *task)
81353+{
81354+ return;
81355+}
81356+
81357+int
81358+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
81359+ const int unsafe_share)
81360+{
81361+ return 0;
81362+}
81363+
81364+void
81365+gr_handle_delete(const ino_t ino, const dev_t dev)
81366+{
81367+ return;
81368+}
81369+
81370+void
81371+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
81372+{
81373+ return;
81374+}
81375+
81376+void
81377+gr_handle_crash(struct task_struct *task, const int sig)
81378+{
81379+ return;
81380+}
81381+
81382+int
81383+gr_check_crash_exec(const struct file *filp)
81384+{
81385+ return 0;
81386+}
81387+
81388+int
81389+gr_check_crash_uid(const uid_t uid)
81390+{
81391+ return 0;
81392+}
81393+
81394+void
81395+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
81396+ struct dentry *old_dentry,
81397+ struct dentry *new_dentry,
81398+ struct vfsmount *mnt, const __u8 replace)
81399+{
81400+ return;
81401+}
81402+
81403+int
81404+gr_search_socket(const int family, const int type, const int protocol)
81405+{
81406+ return 1;
81407+}
81408+
81409+int
81410+gr_search_connectbind(const int mode, const struct socket *sock,
81411+ const struct sockaddr_in *addr)
81412+{
81413+ return 0;
81414+}
81415+
81416+void
81417+gr_handle_alertkill(struct task_struct *task)
81418+{
81419+ return;
81420+}
81421+
81422+__u32
81423+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
81424+{
81425+ return 1;
81426+}
81427+
81428+__u32
81429+gr_acl_handle_hidden_file(const struct dentry * dentry,
81430+ const struct vfsmount * mnt)
81431+{
81432+ return 1;
81433+}
81434+
81435+__u32
81436+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
81437+ int acc_mode)
81438+{
81439+ return 1;
81440+}
81441+
81442+__u32
81443+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
81444+{
81445+ return 1;
81446+}
81447+
81448+__u32
81449+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
81450+{
81451+ return 1;
81452+}
81453+
81454+int
81455+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
81456+ unsigned int *vm_flags)
81457+{
81458+ return 1;
81459+}
81460+
81461+__u32
81462+gr_acl_handle_truncate(const struct dentry * dentry,
81463+ const struct vfsmount * mnt)
81464+{
81465+ return 1;
81466+}
81467+
81468+__u32
81469+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
81470+{
81471+ return 1;
81472+}
81473+
81474+__u32
81475+gr_acl_handle_access(const struct dentry * dentry,
81476+ const struct vfsmount * mnt, const int fmode)
81477+{
81478+ return 1;
81479+}
81480+
81481+__u32
81482+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
81483+ umode_t *mode)
81484+{
81485+ return 1;
81486+}
81487+
81488+__u32
81489+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
81490+{
81491+ return 1;
81492+}
81493+
81494+__u32
81495+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
81496+{
81497+ return 1;
81498+}
81499+
81500+void
81501+grsecurity_init(void)
81502+{
81503+ return;
81504+}
81505+
81506+umode_t gr_acl_umask(void)
81507+{
81508+ return 0;
81509+}
81510+
81511+__u32
81512+gr_acl_handle_mknod(const struct dentry * new_dentry,
81513+ const struct dentry * parent_dentry,
81514+ const struct vfsmount * parent_mnt,
81515+ const int mode)
81516+{
81517+ return 1;
81518+}
81519+
81520+__u32
81521+gr_acl_handle_mkdir(const struct dentry * new_dentry,
81522+ const struct dentry * parent_dentry,
81523+ const struct vfsmount * parent_mnt)
81524+{
81525+ return 1;
81526+}
81527+
81528+__u32
81529+gr_acl_handle_symlink(const struct dentry * new_dentry,
81530+ const struct dentry * parent_dentry,
81531+ const struct vfsmount * parent_mnt, const char *from)
81532+{
81533+ return 1;
81534+}
81535+
81536+__u32
81537+gr_acl_handle_link(const struct dentry * new_dentry,
81538+ const struct dentry * parent_dentry,
81539+ const struct vfsmount * parent_mnt,
81540+ const struct dentry * old_dentry,
81541+ const struct vfsmount * old_mnt, const char *to)
81542+{
81543+ return 1;
81544+}
81545+
81546+int
81547+gr_acl_handle_rename(const struct dentry *new_dentry,
81548+ const struct dentry *parent_dentry,
81549+ const struct vfsmount *parent_mnt,
81550+ const struct dentry *old_dentry,
81551+ const struct inode *old_parent_inode,
81552+ const struct vfsmount *old_mnt, const char *newname)
81553+{
81554+ return 0;
81555+}
81556+
81557+int
81558+gr_acl_handle_filldir(const struct file *file, const char *name,
81559+ const int namelen, const ino_t ino)
81560+{
81561+ return 1;
81562+}
81563+
81564+int
81565+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
81566+ const time_t shm_createtime, const uid_t cuid, const int shmid)
81567+{
81568+ return 1;
81569+}
81570+
81571+int
81572+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
81573+{
81574+ return 0;
81575+}
81576+
81577+int
81578+gr_search_accept(const struct socket *sock)
81579+{
81580+ return 0;
81581+}
81582+
81583+int
81584+gr_search_listen(const struct socket *sock)
81585+{
81586+ return 0;
81587+}
81588+
81589+int
81590+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
81591+{
81592+ return 0;
81593+}
81594+
81595+__u32
81596+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
81597+{
81598+ return 1;
81599+}
81600+
81601+__u32
81602+gr_acl_handle_creat(const struct dentry * dentry,
81603+ const struct dentry * p_dentry,
81604+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
81605+ const int imode)
81606+{
81607+ return 1;
81608+}
81609+
81610+void
81611+gr_acl_handle_exit(void)
81612+{
81613+ return;
81614+}
81615+
81616+int
81617+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
81618+{
81619+ return 1;
81620+}
81621+
81622+void
81623+gr_set_role_label(const uid_t uid, const gid_t gid)
81624+{
81625+ return;
81626+}
81627+
81628+int
81629+gr_acl_handle_procpidmem(const struct task_struct *task)
81630+{
81631+ return 0;
81632+}
81633+
81634+int
81635+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
81636+{
81637+ return 0;
81638+}
81639+
81640+int
81641+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
81642+{
81643+ return 0;
81644+}
81645+
81646+void
81647+gr_set_kernel_label(struct task_struct *task)
81648+{
81649+ return;
81650+}
81651+
81652+int
81653+gr_check_user_change(int real, int effective, int fs)
81654+{
81655+ return 0;
81656+}
81657+
81658+int
81659+gr_check_group_change(int real, int effective, int fs)
81660+{
81661+ return 0;
81662+}
81663+
81664+int gr_acl_enable_at_secure(void)
81665+{
81666+ return 0;
81667+}
81668+
81669+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
81670+{
81671+ return dentry->d_inode->i_sb->s_dev;
81672+}
81673+
81674+EXPORT_SYMBOL(gr_learn_resource);
81675+EXPORT_SYMBOL(gr_set_kernel_label);
81676+#ifdef CONFIG_SECURITY
81677+EXPORT_SYMBOL(gr_check_user_change);
81678+EXPORT_SYMBOL(gr_check_group_change);
81679+#endif
81680diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
81681new file mode 100644
81682index 0000000..a96e155
81683--- /dev/null
81684+++ b/grsecurity/grsec_exec.c
81685@@ -0,0 +1,204 @@
81686+#include <linux/kernel.h>
81687+#include <linux/sched.h>
81688+#include <linux/file.h>
81689+#include <linux/binfmts.h>
81690+#include <linux/smp_lock.h>
81691+#include <linux/fs.h>
81692+#include <linux/types.h>
81693+#include <linux/grdefs.h>
81694+#include <linux/grinternal.h>
81695+#include <linux/capability.h>
81696+#include <linux/compat.h>
81697+#include <linux/module.h>
81698+
81699+#include <asm/uaccess.h>
81700+
81701+#ifdef CONFIG_GRKERNSEC_EXECLOG
81702+static char gr_exec_arg_buf[132];
81703+static DEFINE_MUTEX(gr_exec_arg_mutex);
81704+#endif
81705+
81706+void
81707+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
81708+{
81709+#ifdef CONFIG_GRKERNSEC_EXECLOG
81710+ char *grarg = gr_exec_arg_buf;
81711+ unsigned int i, x, execlen = 0;
81712+ char c;
81713+
81714+ if (!((grsec_enable_execlog && grsec_enable_group &&
81715+ in_group_p(grsec_audit_gid))
81716+ || (grsec_enable_execlog && !grsec_enable_group)))
81717+ return;
81718+
81719+ mutex_lock(&gr_exec_arg_mutex);
81720+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
81721+
81722+ if (unlikely(argv == NULL))
81723+ goto log;
81724+
81725+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
81726+ const char __user *p;
81727+ unsigned int len;
81728+
81729+ if (copy_from_user(&p, argv + i, sizeof(p)))
81730+ goto log;
81731+ if (!p)
81732+ goto log;
81733+ len = strnlen_user(p, 128 - execlen);
81734+ if (len > 128 - execlen)
81735+ len = 128 - execlen;
81736+ else if (len > 0)
81737+ len--;
81738+ if (copy_from_user(grarg + execlen, p, len))
81739+ goto log;
81740+
81741+ /* rewrite unprintable characters */
81742+ for (x = 0; x < len; x++) {
81743+ c = *(grarg + execlen + x);
81744+ if (c < 32 || c > 126)
81745+ *(grarg + execlen + x) = ' ';
81746+ }
81747+
81748+ execlen += len;
81749+ *(grarg + execlen) = ' ';
81750+ *(grarg + execlen + 1) = '\0';
81751+ execlen++;
81752+ }
81753+
81754+ log:
81755+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
81756+ bprm->file->f_path.mnt, grarg);
81757+ mutex_unlock(&gr_exec_arg_mutex);
81758+#endif
81759+ return;
81760+}
81761+
81762+#ifdef CONFIG_COMPAT
81763+void
81764+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
81765+{
81766+#ifdef CONFIG_GRKERNSEC_EXECLOG
81767+ char *grarg = gr_exec_arg_buf;
81768+ unsigned int i, x, execlen = 0;
81769+ char c;
81770+
81771+ if (!((grsec_enable_execlog && grsec_enable_group &&
81772+ in_group_p(grsec_audit_gid))
81773+ || (grsec_enable_execlog && !grsec_enable_group)))
81774+ return;
81775+
81776+ mutex_lock(&gr_exec_arg_mutex);
81777+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
81778+
81779+ if (unlikely(argv == NULL))
81780+ goto log;
81781+
81782+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
81783+ compat_uptr_t p;
81784+ unsigned int len;
81785+
81786+ if (get_user(p, argv + i))
81787+ goto log;
81788+ len = strnlen_user(compat_ptr(p), 128 - execlen);
81789+ if (len > 128 - execlen)
81790+ len = 128 - execlen;
81791+ else if (len > 0)
81792+ len--;
81793+ else
81794+ goto log;
81795+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
81796+ goto log;
81797+
81798+ /* rewrite unprintable characters */
81799+ for (x = 0; x < len; x++) {
81800+ c = *(grarg + execlen + x);
81801+ if (c < 32 || c > 126)
81802+ *(grarg + execlen + x) = ' ';
81803+ }
81804+
81805+ execlen += len;
81806+ *(grarg + execlen) = ' ';
81807+ *(grarg + execlen + 1) = '\0';
81808+ execlen++;
81809+ }
81810+
81811+ log:
81812+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
81813+ bprm->file->f_path.mnt, grarg);
81814+ mutex_unlock(&gr_exec_arg_mutex);
81815+#endif
81816+ return;
81817+}
81818+#endif
81819+
81820+#ifdef CONFIG_GRKERNSEC
81821+extern int gr_acl_is_capable(const int cap);
81822+extern int gr_acl_is_capable_nolog(const int cap);
81823+extern int gr_chroot_is_capable(const int cap);
81824+extern int gr_chroot_is_capable_nolog(const int cap);
81825+#endif
81826+
81827+const char *captab_log[] = {
81828+ "CAP_CHOWN",
81829+ "CAP_DAC_OVERRIDE",
81830+ "CAP_DAC_READ_SEARCH",
81831+ "CAP_FOWNER",
81832+ "CAP_FSETID",
81833+ "CAP_KILL",
81834+ "CAP_SETGID",
81835+ "CAP_SETUID",
81836+ "CAP_SETPCAP",
81837+ "CAP_LINUX_IMMUTABLE",
81838+ "CAP_NET_BIND_SERVICE",
81839+ "CAP_NET_BROADCAST",
81840+ "CAP_NET_ADMIN",
81841+ "CAP_NET_RAW",
81842+ "CAP_IPC_LOCK",
81843+ "CAP_IPC_OWNER",
81844+ "CAP_SYS_MODULE",
81845+ "CAP_SYS_RAWIO",
81846+ "CAP_SYS_CHROOT",
81847+ "CAP_SYS_PTRACE",
81848+ "CAP_SYS_PACCT",
81849+ "CAP_SYS_ADMIN",
81850+ "CAP_SYS_BOOT",
81851+ "CAP_SYS_NICE",
81852+ "CAP_SYS_RESOURCE",
81853+ "CAP_SYS_TIME",
81854+ "CAP_SYS_TTY_CONFIG",
81855+ "CAP_MKNOD",
81856+ "CAP_LEASE",
81857+ "CAP_AUDIT_WRITE",
81858+ "CAP_AUDIT_CONTROL",
81859+ "CAP_SETFCAP",
81860+ "CAP_MAC_OVERRIDE",
81861+ "CAP_MAC_ADMIN"
81862+};
81863+
81864+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
81865+
81866+int gr_is_capable(const int cap)
81867+{
81868+#ifdef CONFIG_GRKERNSEC
81869+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
81870+ return 1;
81871+ return 0;
81872+#else
81873+ return 1;
81874+#endif
81875+}
81876+
81877+int gr_is_capable_nolog(const int cap)
81878+{
81879+#ifdef CONFIG_GRKERNSEC
81880+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
81881+ return 1;
81882+ return 0;
81883+#else
81884+ return 1;
81885+#endif
81886+}
81887+
81888+EXPORT_SYMBOL(gr_is_capable);
81889+EXPORT_SYMBOL(gr_is_capable_nolog);
81890diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
81891new file mode 100644
81892index 0000000..d3ee748
81893--- /dev/null
81894+++ b/grsecurity/grsec_fifo.c
81895@@ -0,0 +1,24 @@
81896+#include <linux/kernel.h>
81897+#include <linux/sched.h>
81898+#include <linux/fs.h>
81899+#include <linux/file.h>
81900+#include <linux/grinternal.h>
81901+
81902+int
81903+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
81904+ const struct dentry *dir, const int flag, const int acc_mode)
81905+{
81906+#ifdef CONFIG_GRKERNSEC_FIFO
81907+ const struct cred *cred = current_cred();
81908+
81909+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
81910+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
81911+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
81912+ (cred->fsuid != dentry->d_inode->i_uid)) {
81913+ if (!inode_permission(dentry->d_inode, acc_mode))
81914+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
81915+ return -EACCES;
81916+ }
81917+#endif
81918+ return 0;
81919+}
81920diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
81921new file mode 100644
81922index 0000000..8ca18bf
81923--- /dev/null
81924+++ b/grsecurity/grsec_fork.c
81925@@ -0,0 +1,23 @@
81926+#include <linux/kernel.h>
81927+#include <linux/sched.h>
81928+#include <linux/grsecurity.h>
81929+#include <linux/grinternal.h>
81930+#include <linux/errno.h>
81931+
81932+void
81933+gr_log_forkfail(const int retval)
81934+{
81935+#ifdef CONFIG_GRKERNSEC_FORKFAIL
81936+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
81937+ switch (retval) {
81938+ case -EAGAIN:
81939+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
81940+ break;
81941+ case -ENOMEM:
81942+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
81943+ break;
81944+ }
81945+ }
81946+#endif
81947+ return;
81948+}
81949diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
81950new file mode 100644
81951index 0000000..1e995d3
81952--- /dev/null
81953+++ b/grsecurity/grsec_init.c
81954@@ -0,0 +1,278 @@
81955+#include <linux/kernel.h>
81956+#include <linux/sched.h>
81957+#include <linux/mm.h>
81958+#include <linux/smp_lock.h>
81959+#include <linux/gracl.h>
81960+#include <linux/slab.h>
81961+#include <linux/vmalloc.h>
81962+#include <linux/percpu.h>
81963+#include <linux/module.h>
81964+
81965+int grsec_enable_ptrace_readexec;
81966+int grsec_enable_setxid;
81967+int grsec_enable_brute;
81968+int grsec_enable_link;
81969+int grsec_enable_dmesg;
81970+int grsec_enable_harden_ptrace;
81971+int grsec_enable_fifo;
81972+int grsec_enable_execlog;
81973+int grsec_enable_signal;
81974+int grsec_enable_forkfail;
81975+int grsec_enable_audit_ptrace;
81976+int grsec_enable_time;
81977+int grsec_enable_audit_textrel;
81978+int grsec_enable_group;
81979+int grsec_audit_gid;
81980+int grsec_enable_chdir;
81981+int grsec_enable_mount;
81982+int grsec_enable_rofs;
81983+int grsec_enable_chroot_findtask;
81984+int grsec_enable_chroot_mount;
81985+int grsec_enable_chroot_shmat;
81986+int grsec_enable_chroot_fchdir;
81987+int grsec_enable_chroot_double;
81988+int grsec_enable_chroot_pivot;
81989+int grsec_enable_chroot_chdir;
81990+int grsec_enable_chroot_chmod;
81991+int grsec_enable_chroot_mknod;
81992+int grsec_enable_chroot_nice;
81993+int grsec_enable_chroot_execlog;
81994+int grsec_enable_chroot_caps;
81995+int grsec_enable_chroot_sysctl;
81996+int grsec_enable_chroot_unix;
81997+int grsec_enable_tpe;
81998+int grsec_tpe_gid;
81999+int grsec_enable_blackhole;
82000+#ifdef CONFIG_IPV6_MODULE
82001+EXPORT_SYMBOL(grsec_enable_blackhole);
82002+#endif
82003+int grsec_lastack_retries;
82004+int grsec_enable_tpe_all;
82005+int grsec_enable_tpe_invert;
82006+int grsec_enable_socket_all;
82007+int grsec_socket_all_gid;
82008+int grsec_enable_socket_client;
82009+int grsec_socket_client_gid;
82010+int grsec_enable_socket_server;
82011+int grsec_socket_server_gid;
82012+int grsec_resource_logging;
82013+int grsec_disable_privio;
82014+int grsec_enable_log_rwxmaps;
82015+int grsec_lock;
82016+
82017+DEFINE_SPINLOCK(grsec_alert_lock);
82018+unsigned long grsec_alert_wtime = 0;
82019+unsigned long grsec_alert_fyet = 0;
82020+
82021+DEFINE_SPINLOCK(grsec_audit_lock);
82022+
82023+DEFINE_RWLOCK(grsec_exec_file_lock);
82024+
82025+char *gr_shared_page[4];
82026+
82027+char *gr_alert_log_fmt;
82028+char *gr_audit_log_fmt;
82029+char *gr_alert_log_buf;
82030+char *gr_audit_log_buf;
82031+
82032+extern struct gr_arg *gr_usermode;
82033+extern unsigned char *gr_system_salt;
82034+extern unsigned char *gr_system_sum;
82035+
82036+void __init
82037+grsecurity_init(void)
82038+{
82039+ int j;
82040+ /* create the per-cpu shared pages */
82041+
82042+#ifdef CONFIG_X86
82043+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
82044+#endif
82045+
82046+ for (j = 0; j < 4; j++) {
82047+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
82048+ if (gr_shared_page[j] == NULL) {
82049+ panic("Unable to allocate grsecurity shared page");
82050+ return;
82051+ }
82052+ }
82053+
82054+ /* allocate log buffers */
82055+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
82056+ if (!gr_alert_log_fmt) {
82057+ panic("Unable to allocate grsecurity alert log format buffer");
82058+ return;
82059+ }
82060+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
82061+ if (!gr_audit_log_fmt) {
82062+ panic("Unable to allocate grsecurity audit log format buffer");
82063+ return;
82064+ }
82065+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
82066+ if (!gr_alert_log_buf) {
82067+ panic("Unable to allocate grsecurity alert log buffer");
82068+ return;
82069+ }
82070+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
82071+ if (!gr_audit_log_buf) {
82072+ panic("Unable to allocate grsecurity audit log buffer");
82073+ return;
82074+ }
82075+
82076+ /* allocate memory for authentication structure */
82077+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
82078+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
82079+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
82080+
82081+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
82082+ panic("Unable to allocate grsecurity authentication structure");
82083+ return;
82084+ }
82085+
82086+
82087+#ifdef CONFIG_GRKERNSEC_IO
82088+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
82089+ grsec_disable_privio = 1;
82090+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
82091+ grsec_disable_privio = 1;
82092+#else
82093+ grsec_disable_privio = 0;
82094+#endif
82095+#endif
82096+
82097+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
82098+ /* for backward compatibility, tpe_invert always defaults to on if
82099+ enabled in the kernel
82100+ */
82101+ grsec_enable_tpe_invert = 1;
82102+#endif
82103+
82104+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
82105+#ifndef CONFIG_GRKERNSEC_SYSCTL
82106+ grsec_lock = 1;
82107+#endif
82108+
82109+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
82110+ grsec_enable_audit_textrel = 1;
82111+#endif
82112+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
82113+ grsec_enable_log_rwxmaps = 1;
82114+#endif
82115+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
82116+ grsec_enable_group = 1;
82117+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
82118+#endif
82119+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
82120+ grsec_enable_chdir = 1;
82121+#endif
82122+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
82123+ grsec_enable_harden_ptrace = 1;
82124+#endif
82125+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
82126+ grsec_enable_mount = 1;
82127+#endif
82128+#ifdef CONFIG_GRKERNSEC_LINK
82129+ grsec_enable_link = 1;
82130+#endif
82131+#ifdef CONFIG_GRKERNSEC_BRUTE
82132+ grsec_enable_brute = 1;
82133+#endif
82134+#ifdef CONFIG_GRKERNSEC_DMESG
82135+ grsec_enable_dmesg = 1;
82136+#endif
82137+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82138+ grsec_enable_blackhole = 1;
82139+ grsec_lastack_retries = 4;
82140+#endif
82141+#ifdef CONFIG_GRKERNSEC_FIFO
82142+ grsec_enable_fifo = 1;
82143+#endif
82144+#ifdef CONFIG_GRKERNSEC_EXECLOG
82145+ grsec_enable_execlog = 1;
82146+#endif
82147+#ifdef CONFIG_GRKERNSEC_SETXID
82148+ grsec_enable_setxid = 1;
82149+#endif
82150+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
82151+ grsec_enable_ptrace_readexec = 1;
82152+#endif
82153+#ifdef CONFIG_GRKERNSEC_SIGNAL
82154+ grsec_enable_signal = 1;
82155+#endif
82156+#ifdef CONFIG_GRKERNSEC_FORKFAIL
82157+ grsec_enable_forkfail = 1;
82158+#endif
82159+#ifdef CONFIG_GRKERNSEC_TIME
82160+ grsec_enable_time = 1;
82161+#endif
82162+#ifdef CONFIG_GRKERNSEC_RESLOG
82163+ grsec_resource_logging = 1;
82164+#endif
82165+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
82166+ grsec_enable_chroot_findtask = 1;
82167+#endif
82168+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
82169+ grsec_enable_chroot_unix = 1;
82170+#endif
82171+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
82172+ grsec_enable_chroot_mount = 1;
82173+#endif
82174+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
82175+ grsec_enable_chroot_fchdir = 1;
82176+#endif
82177+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
82178+ grsec_enable_chroot_shmat = 1;
82179+#endif
82180+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
82181+ grsec_enable_audit_ptrace = 1;
82182+#endif
82183+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
82184+ grsec_enable_chroot_double = 1;
82185+#endif
82186+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
82187+ grsec_enable_chroot_pivot = 1;
82188+#endif
82189+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
82190+ grsec_enable_chroot_chdir = 1;
82191+#endif
82192+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
82193+ grsec_enable_chroot_chmod = 1;
82194+#endif
82195+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
82196+ grsec_enable_chroot_mknod = 1;
82197+#endif
82198+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
82199+ grsec_enable_chroot_nice = 1;
82200+#endif
82201+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
82202+ grsec_enable_chroot_execlog = 1;
82203+#endif
82204+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
82205+ grsec_enable_chroot_caps = 1;
82206+#endif
82207+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
82208+ grsec_enable_chroot_sysctl = 1;
82209+#endif
82210+#ifdef CONFIG_GRKERNSEC_TPE
82211+ grsec_enable_tpe = 1;
82212+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
82213+#ifdef CONFIG_GRKERNSEC_TPE_ALL
82214+ grsec_enable_tpe_all = 1;
82215+#endif
82216+#endif
82217+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
82218+ grsec_enable_socket_all = 1;
82219+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
82220+#endif
82221+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
82222+ grsec_enable_socket_client = 1;
82223+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
82224+#endif
82225+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
82226+ grsec_enable_socket_server = 1;
82227+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
82228+#endif
82229+#endif
82230+
82231+ return;
82232+}
82233diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
82234new file mode 100644
82235index 0000000..3efe141
82236--- /dev/null
82237+++ b/grsecurity/grsec_link.c
82238@@ -0,0 +1,43 @@
82239+#include <linux/kernel.h>
82240+#include <linux/sched.h>
82241+#include <linux/fs.h>
82242+#include <linux/file.h>
82243+#include <linux/grinternal.h>
82244+
82245+int
82246+gr_handle_follow_link(const struct inode *parent,
82247+ const struct inode *inode,
82248+ const struct dentry *dentry, const struct vfsmount *mnt)
82249+{
82250+#ifdef CONFIG_GRKERNSEC_LINK
82251+ const struct cred *cred = current_cred();
82252+
82253+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
82254+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
82255+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
82256+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
82257+ return -EACCES;
82258+ }
82259+#endif
82260+ return 0;
82261+}
82262+
82263+int
82264+gr_handle_hardlink(const struct dentry *dentry,
82265+ const struct vfsmount *mnt,
82266+ struct inode *inode, const int mode, const char *to)
82267+{
82268+#ifdef CONFIG_GRKERNSEC_LINK
82269+ const struct cred *cred = current_cred();
82270+
82271+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
82272+ (!S_ISREG(mode) || (mode & S_ISUID) ||
82273+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
82274+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
82275+ !capable(CAP_FOWNER) && cred->uid) {
82276+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
82277+ return -EPERM;
82278+ }
82279+#endif
82280+ return 0;
82281+}
82282diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
82283new file mode 100644
82284index 0000000..a45d2e9
82285--- /dev/null
82286+++ b/grsecurity/grsec_log.c
82287@@ -0,0 +1,322 @@
82288+#include <linux/kernel.h>
82289+#include <linux/sched.h>
82290+#include <linux/file.h>
82291+#include <linux/tty.h>
82292+#include <linux/fs.h>
82293+#include <linux/grinternal.h>
82294+
82295+#ifdef CONFIG_TREE_PREEMPT_RCU
82296+#define DISABLE_PREEMPT() preempt_disable()
82297+#define ENABLE_PREEMPT() preempt_enable()
82298+#else
82299+#define DISABLE_PREEMPT()
82300+#define ENABLE_PREEMPT()
82301+#endif
82302+
82303+#define BEGIN_LOCKS(x) \
82304+ DISABLE_PREEMPT(); \
82305+ rcu_read_lock(); \
82306+ read_lock(&tasklist_lock); \
82307+ read_lock(&grsec_exec_file_lock); \
82308+ if (x != GR_DO_AUDIT) \
82309+ spin_lock(&grsec_alert_lock); \
82310+ else \
82311+ spin_lock(&grsec_audit_lock)
82312+
82313+#define END_LOCKS(x) \
82314+ if (x != GR_DO_AUDIT) \
82315+ spin_unlock(&grsec_alert_lock); \
82316+ else \
82317+ spin_unlock(&grsec_audit_lock); \
82318+ read_unlock(&grsec_exec_file_lock); \
82319+ read_unlock(&tasklist_lock); \
82320+ rcu_read_unlock(); \
82321+ ENABLE_PREEMPT(); \
82322+ if (x == GR_DONT_AUDIT) \
82323+ gr_handle_alertkill(current)
82324+
82325+enum {
82326+ FLOODING,
82327+ NO_FLOODING
82328+};
82329+
82330+extern char *gr_alert_log_fmt;
82331+extern char *gr_audit_log_fmt;
82332+extern char *gr_alert_log_buf;
82333+extern char *gr_audit_log_buf;
82334+
82335+static int gr_log_start(int audit)
82336+{
82337+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
82338+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
82339+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
82340+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
82341+ unsigned long curr_secs = get_seconds();
82342+
82343+ if (audit == GR_DO_AUDIT)
82344+ goto set_fmt;
82345+
82346+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
82347+ grsec_alert_wtime = curr_secs;
82348+ grsec_alert_fyet = 0;
82349+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
82350+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
82351+ grsec_alert_fyet++;
82352+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
82353+ grsec_alert_wtime = curr_secs;
82354+ grsec_alert_fyet++;
82355+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
82356+ return FLOODING;
82357+ }
82358+ else return FLOODING;
82359+
82360+set_fmt:
82361+#endif
82362+ memset(buf, 0, PAGE_SIZE);
82363+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
82364+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
82365+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
82366+ } else if (current->signal->curr_ip) {
82367+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
82368+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
82369+ } else if (gr_acl_is_enabled()) {
82370+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
82371+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
82372+ } else {
82373+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
82374+ strcpy(buf, fmt);
82375+ }
82376+
82377+ return NO_FLOODING;
82378+}
82379+
82380+static void gr_log_middle(int audit, const char *msg, va_list ap)
82381+ __attribute__ ((format (printf, 2, 0)));
82382+
82383+static void gr_log_middle(int audit, const char *msg, va_list ap)
82384+{
82385+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
82386+ unsigned int len = strlen(buf);
82387+
82388+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
82389+
82390+ return;
82391+}
82392+
82393+static void gr_log_middle_varargs(int audit, const char *msg, ...)
82394+ __attribute__ ((format (printf, 2, 3)));
82395+
82396+static void gr_log_middle_varargs(int audit, const char *msg, ...)
82397+{
82398+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
82399+ unsigned int len = strlen(buf);
82400+ va_list ap;
82401+
82402+ va_start(ap, msg);
82403+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
82404+ va_end(ap);
82405+
82406+ return;
82407+}
82408+
82409+static void gr_log_end(int audit, int append_default)
82410+{
82411+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
82412+
82413+ if (append_default) {
82414+ unsigned int len = strlen(buf);
82415+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
82416+ }
82417+
82418+ printk("%s\n", buf);
82419+
82420+ return;
82421+}
82422+
82423+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
82424+{
82425+ int logtype;
82426+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
82427+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
82428+ void *voidptr = NULL;
82429+ int num1 = 0, num2 = 0;
82430+ unsigned long ulong1 = 0, ulong2 = 0;
82431+ struct dentry *dentry = NULL;
82432+ struct vfsmount *mnt = NULL;
82433+ struct file *file = NULL;
82434+ struct task_struct *task = NULL;
82435+ const struct cred *cred, *pcred;
82436+ va_list ap;
82437+
82438+ BEGIN_LOCKS(audit);
82439+ logtype = gr_log_start(audit);
82440+ if (logtype == FLOODING) {
82441+ END_LOCKS(audit);
82442+ return;
82443+ }
82444+ va_start(ap, argtypes);
82445+ switch (argtypes) {
82446+ case GR_TTYSNIFF:
82447+ task = va_arg(ap, struct task_struct *);
82448+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
82449+ break;
82450+ case GR_SYSCTL_HIDDEN:
82451+ str1 = va_arg(ap, char *);
82452+ gr_log_middle_varargs(audit, msg, result, str1);
82453+ break;
82454+ case GR_RBAC:
82455+ dentry = va_arg(ap, struct dentry *);
82456+ mnt = va_arg(ap, struct vfsmount *);
82457+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
82458+ break;
82459+ case GR_RBAC_STR:
82460+ dentry = va_arg(ap, struct dentry *);
82461+ mnt = va_arg(ap, struct vfsmount *);
82462+ str1 = va_arg(ap, char *);
82463+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
82464+ break;
82465+ case GR_STR_RBAC:
82466+ str1 = va_arg(ap, char *);
82467+ dentry = va_arg(ap, struct dentry *);
82468+ mnt = va_arg(ap, struct vfsmount *);
82469+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
82470+ break;
82471+ case GR_RBAC_MODE2:
82472+ dentry = va_arg(ap, struct dentry *);
82473+ mnt = va_arg(ap, struct vfsmount *);
82474+ str1 = va_arg(ap, char *);
82475+ str2 = va_arg(ap, char *);
82476+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
82477+ break;
82478+ case GR_RBAC_MODE3:
82479+ dentry = va_arg(ap, struct dentry *);
82480+ mnt = va_arg(ap, struct vfsmount *);
82481+ str1 = va_arg(ap, char *);
82482+ str2 = va_arg(ap, char *);
82483+ str3 = va_arg(ap, char *);
82484+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
82485+ break;
82486+ case GR_FILENAME:
82487+ dentry = va_arg(ap, struct dentry *);
82488+ mnt = va_arg(ap, struct vfsmount *);
82489+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
82490+ break;
82491+ case GR_STR_FILENAME:
82492+ str1 = va_arg(ap, char *);
82493+ dentry = va_arg(ap, struct dentry *);
82494+ mnt = va_arg(ap, struct vfsmount *);
82495+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
82496+ break;
82497+ case GR_FILENAME_STR:
82498+ dentry = va_arg(ap, struct dentry *);
82499+ mnt = va_arg(ap, struct vfsmount *);
82500+ str1 = va_arg(ap, char *);
82501+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
82502+ break;
82503+ case GR_FILENAME_TWO_INT:
82504+ dentry = va_arg(ap, struct dentry *);
82505+ mnt = va_arg(ap, struct vfsmount *);
82506+ num1 = va_arg(ap, int);
82507+ num2 = va_arg(ap, int);
82508+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
82509+ break;
82510+ case GR_FILENAME_TWO_INT_STR:
82511+ dentry = va_arg(ap, struct dentry *);
82512+ mnt = va_arg(ap, struct vfsmount *);
82513+ num1 = va_arg(ap, int);
82514+ num2 = va_arg(ap, int);
82515+ str1 = va_arg(ap, char *);
82516+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
82517+ break;
82518+ case GR_TEXTREL:
82519+ file = va_arg(ap, struct file *);
82520+ ulong1 = va_arg(ap, unsigned long);
82521+ ulong2 = va_arg(ap, unsigned long);
82522+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
82523+ break;
82524+ case GR_PTRACE:
82525+ task = va_arg(ap, struct task_struct *);
82526+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
82527+ break;
82528+ case GR_RESOURCE:
82529+ task = va_arg(ap, struct task_struct *);
82530+ cred = __task_cred(task);
82531+ pcred = __task_cred(task->real_parent);
82532+ ulong1 = va_arg(ap, unsigned long);
82533+ str1 = va_arg(ap, char *);
82534+ ulong2 = va_arg(ap, unsigned long);
82535+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
82536+ break;
82537+ case GR_CAP:
82538+ task = va_arg(ap, struct task_struct *);
82539+ cred = __task_cred(task);
82540+ pcred = __task_cred(task->real_parent);
82541+ str1 = va_arg(ap, char *);
82542+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
82543+ break;
82544+ case GR_SIG:
82545+ str1 = va_arg(ap, char *);
82546+ voidptr = va_arg(ap, void *);
82547+ gr_log_middle_varargs(audit, msg, str1, voidptr);
82548+ break;
82549+ case GR_SIG2:
82550+ task = va_arg(ap, struct task_struct *);
82551+ cred = __task_cred(task);
82552+ pcred = __task_cred(task->real_parent);
82553+ num1 = va_arg(ap, int);
82554+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
82555+ break;
82556+ case GR_CRASH1:
82557+ task = va_arg(ap, struct task_struct *);
82558+ cred = __task_cred(task);
82559+ pcred = __task_cred(task->real_parent);
82560+ ulong1 = va_arg(ap, unsigned long);
82561+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
82562+ break;
82563+ case GR_CRASH2:
82564+ task = va_arg(ap, struct task_struct *);
82565+ cred = __task_cred(task);
82566+ pcred = __task_cred(task->real_parent);
82567+ ulong1 = va_arg(ap, unsigned long);
82568+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
82569+ break;
82570+ case GR_RWXMAP:
82571+ file = va_arg(ap, struct file *);
82572+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
82573+ break;
82574+ case GR_PSACCT:
82575+ {
82576+ unsigned int wday, cday;
82577+ __u8 whr, chr;
82578+ __u8 wmin, cmin;
82579+ __u8 wsec, csec;
82580+ char cur_tty[64] = { 0 };
82581+ char parent_tty[64] = { 0 };
82582+
82583+ task = va_arg(ap, struct task_struct *);
82584+ wday = va_arg(ap, unsigned int);
82585+ cday = va_arg(ap, unsigned int);
82586+ whr = va_arg(ap, int);
82587+ chr = va_arg(ap, int);
82588+ wmin = va_arg(ap, int);
82589+ cmin = va_arg(ap, int);
82590+ wsec = va_arg(ap, int);
82591+ csec = va_arg(ap, int);
82592+ ulong1 = va_arg(ap, unsigned long);
82593+ cred = __task_cred(task);
82594+ pcred = __task_cred(task->real_parent);
82595+
82596+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
82597+ }
82598+ break;
82599+ default:
82600+ gr_log_middle(audit, msg, ap);
82601+ }
82602+ va_end(ap);
82603+ // these don't need DEFAULTSECARGS printed on the end
82604+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
82605+ gr_log_end(audit, 0);
82606+ else
82607+ gr_log_end(audit, 1);
82608+ END_LOCKS(audit);
82609+}
82610diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
82611new file mode 100644
82612index 0000000..f536303
82613--- /dev/null
82614+++ b/grsecurity/grsec_mem.c
82615@@ -0,0 +1,40 @@
82616+#include <linux/kernel.h>
82617+#include <linux/sched.h>
82618+#include <linux/mm.h>
82619+#include <linux/mman.h>
82620+#include <linux/grinternal.h>
82621+
82622+void
82623+gr_handle_ioperm(void)
82624+{
82625+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
82626+ return;
82627+}
82628+
82629+void
82630+gr_handle_iopl(void)
82631+{
82632+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
82633+ return;
82634+}
82635+
82636+void
82637+gr_handle_mem_readwrite(u64 from, u64 to)
82638+{
82639+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
82640+ return;
82641+}
82642+
82643+void
82644+gr_handle_vm86(void)
82645+{
82646+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
82647+ return;
82648+}
82649+
82650+void
82651+gr_log_badprocpid(const char *entry)
82652+{
82653+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
82654+ return;
82655+}
82656diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
82657new file mode 100644
82658index 0000000..2131422
82659--- /dev/null
82660+++ b/grsecurity/grsec_mount.c
82661@@ -0,0 +1,62 @@
82662+#include <linux/kernel.h>
82663+#include <linux/sched.h>
82664+#include <linux/mount.h>
82665+#include <linux/grsecurity.h>
82666+#include <linux/grinternal.h>
82667+
82668+void
82669+gr_log_remount(const char *devname, const int retval)
82670+{
82671+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
82672+ if (grsec_enable_mount && (retval >= 0))
82673+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
82674+#endif
82675+ return;
82676+}
82677+
82678+void
82679+gr_log_unmount(const char *devname, const int retval)
82680+{
82681+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
82682+ if (grsec_enable_mount && (retval >= 0))
82683+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
82684+#endif
82685+ return;
82686+}
82687+
82688+void
82689+gr_log_mount(const char *from, const char *to, const int retval)
82690+{
82691+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
82692+ if (grsec_enable_mount && (retval >= 0))
82693+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
82694+#endif
82695+ return;
82696+}
82697+
82698+int
82699+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
82700+{
82701+#ifdef CONFIG_GRKERNSEC_ROFS
82702+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
82703+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
82704+ return -EPERM;
82705+ } else
82706+ return 0;
82707+#endif
82708+ return 0;
82709+}
82710+
82711+int
82712+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
82713+{
82714+#ifdef CONFIG_GRKERNSEC_ROFS
82715+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
82716+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
82717+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
82718+ return -EPERM;
82719+ } else
82720+ return 0;
82721+#endif
82722+ return 0;
82723+}
82724diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
82725new file mode 100644
82726index 0000000..a3b12a0
82727--- /dev/null
82728+++ b/grsecurity/grsec_pax.c
82729@@ -0,0 +1,36 @@
82730+#include <linux/kernel.h>
82731+#include <linux/sched.h>
82732+#include <linux/mm.h>
82733+#include <linux/file.h>
82734+#include <linux/grinternal.h>
82735+#include <linux/grsecurity.h>
82736+
82737+void
82738+gr_log_textrel(struct vm_area_struct * vma)
82739+{
82740+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
82741+ if (grsec_enable_audit_textrel)
82742+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
82743+#endif
82744+ return;
82745+}
82746+
82747+void
82748+gr_log_rwxmmap(struct file *file)
82749+{
82750+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
82751+ if (grsec_enable_log_rwxmaps)
82752+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
82753+#endif
82754+ return;
82755+}
82756+
82757+void
82758+gr_log_rwxmprotect(struct file *file)
82759+{
82760+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
82761+ if (grsec_enable_log_rwxmaps)
82762+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
82763+#endif
82764+ return;
82765+}
82766diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
82767new file mode 100644
82768index 0000000..78f8733
82769--- /dev/null
82770+++ b/grsecurity/grsec_ptrace.c
82771@@ -0,0 +1,30 @@
82772+#include <linux/kernel.h>
82773+#include <linux/sched.h>
82774+#include <linux/grinternal.h>
82775+#include <linux/security.h>
82776+
82777+void
82778+gr_audit_ptrace(struct task_struct *task)
82779+{
82780+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
82781+ if (grsec_enable_audit_ptrace)
82782+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
82783+#endif
82784+ return;
82785+}
82786+
82787+int
82788+gr_ptrace_readexec(struct file *file, int unsafe_flags)
82789+{
82790+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
82791+ const struct dentry *dentry = file->f_path.dentry;
82792+ const struct vfsmount *mnt = file->f_path.mnt;
82793+
82794+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
82795+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
82796+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
82797+ return -EACCES;
82798+ }
82799+#endif
82800+ return 0;
82801+}
82802diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
82803new file mode 100644
82804index 0000000..c648492
82805--- /dev/null
82806+++ b/grsecurity/grsec_sig.c
82807@@ -0,0 +1,206 @@
82808+#include <linux/kernel.h>
82809+#include <linux/sched.h>
82810+#include <linux/delay.h>
82811+#include <linux/grsecurity.h>
82812+#include <linux/grinternal.h>
82813+#include <linux/hardirq.h>
82814+
82815+char *signames[] = {
82816+ [SIGSEGV] = "Segmentation fault",
82817+ [SIGILL] = "Illegal instruction",
82818+ [SIGABRT] = "Abort",
82819+ [SIGBUS] = "Invalid alignment/Bus error"
82820+};
82821+
82822+void
82823+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
82824+{
82825+#ifdef CONFIG_GRKERNSEC_SIGNAL
82826+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
82827+ (sig == SIGABRT) || (sig == SIGBUS))) {
82828+ if (t->pid == current->pid) {
82829+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
82830+ } else {
82831+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
82832+ }
82833+ }
82834+#endif
82835+ return;
82836+}
82837+
82838+int
82839+gr_handle_signal(const struct task_struct *p, const int sig)
82840+{
82841+#ifdef CONFIG_GRKERNSEC
82842+ /* ignore the 0 signal for protected task checks */
82843+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
82844+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
82845+ return -EPERM;
82846+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
82847+ return -EPERM;
82848+ }
82849+#endif
82850+ return 0;
82851+}
82852+
82853+#ifdef CONFIG_GRKERNSEC
82854+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
82855+
82856+int gr_fake_force_sig(int sig, struct task_struct *t)
82857+{
82858+ unsigned long int flags;
82859+ int ret, blocked, ignored;
82860+ struct k_sigaction *action;
82861+
82862+ spin_lock_irqsave(&t->sighand->siglock, flags);
82863+ action = &t->sighand->action[sig-1];
82864+ ignored = action->sa.sa_handler == SIG_IGN;
82865+ blocked = sigismember(&t->blocked, sig);
82866+ if (blocked || ignored) {
82867+ action->sa.sa_handler = SIG_DFL;
82868+ if (blocked) {
82869+ sigdelset(&t->blocked, sig);
82870+ recalc_sigpending_and_wake(t);
82871+ }
82872+ }
82873+ if (action->sa.sa_handler == SIG_DFL)
82874+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
82875+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
82876+
82877+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
82878+
82879+ return ret;
82880+}
82881+#endif
82882+
82883+#ifdef CONFIG_GRKERNSEC_BRUTE
82884+#define GR_USER_BAN_TIME (15 * 60)
82885+
82886+static int __get_dumpable(unsigned long mm_flags)
82887+{
82888+ int ret;
82889+
82890+ ret = mm_flags & MMF_DUMPABLE_MASK;
82891+ return (ret >= 2) ? 2 : ret;
82892+}
82893+#endif
82894+
82895+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
82896+{
82897+#ifdef CONFIG_GRKERNSEC_BRUTE
82898+ uid_t uid = 0;
82899+
82900+ if (!grsec_enable_brute)
82901+ return;
82902+
82903+ rcu_read_lock();
82904+ read_lock(&tasklist_lock);
82905+ read_lock(&grsec_exec_file_lock);
82906+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
82907+ p->real_parent->brute = 1;
82908+ else {
82909+ const struct cred *cred = __task_cred(p), *cred2;
82910+ struct task_struct *tsk, *tsk2;
82911+
82912+ if (!__get_dumpable(mm_flags) && cred->uid) {
82913+ struct user_struct *user;
82914+
82915+ uid = cred->uid;
82916+
82917+ /* this is put upon execution past expiration */
82918+ user = find_user(uid);
82919+ if (user == NULL)
82920+ goto unlock;
82921+ user->banned = 1;
82922+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
82923+ if (user->ban_expires == ~0UL)
82924+ user->ban_expires--;
82925+
82926+ do_each_thread(tsk2, tsk) {
82927+ cred2 = __task_cred(tsk);
82928+ if (tsk != p && cred2->uid == uid)
82929+ gr_fake_force_sig(SIGKILL, tsk);
82930+ } while_each_thread(tsk2, tsk);
82931+ }
82932+ }
82933+unlock:
82934+ read_unlock(&grsec_exec_file_lock);
82935+ read_unlock(&tasklist_lock);
82936+ rcu_read_unlock();
82937+
82938+ if (uid)
82939+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
82940+#endif
82941+ return;
82942+}
82943+
82944+void gr_handle_brute_check(void)
82945+{
82946+#ifdef CONFIG_GRKERNSEC_BRUTE
82947+ if (current->brute)
82948+ msleep(30 * 1000);
82949+#endif
82950+ return;
82951+}
82952+
82953+void gr_handle_kernel_exploit(void)
82954+{
82955+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
82956+ const struct cred *cred;
82957+ struct task_struct *tsk, *tsk2;
82958+ struct user_struct *user;
82959+ uid_t uid;
82960+
82961+ if (in_irq() || in_serving_softirq() || in_nmi())
82962+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
82963+
82964+ uid = current_uid();
82965+
82966+ if (uid == 0)
82967+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
82968+ else {
82969+ /* kill all the processes of this user, hold a reference
82970+ to their creds struct, and prevent them from creating
82971+ another process until system reset
82972+ */
82973+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
82974+ /* we intentionally leak this ref */
82975+ user = get_uid(current->cred->user);
82976+ if (user) {
82977+ user->banned = 1;
82978+ user->ban_expires = ~0UL;
82979+ }
82980+
82981+ read_lock(&tasklist_lock);
82982+ do_each_thread(tsk2, tsk) {
82983+ cred = __task_cred(tsk);
82984+ if (cred->uid == uid)
82985+ gr_fake_force_sig(SIGKILL, tsk);
82986+ } while_each_thread(tsk2, tsk);
82987+ read_unlock(&tasklist_lock);
82988+ }
82989+#endif
82990+}
82991+
82992+int __gr_process_user_ban(struct user_struct *user)
82993+{
82994+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
82995+ if (unlikely(user->banned)) {
82996+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
82997+ user->banned = 0;
82998+ user->ban_expires = 0;
82999+ free_uid(user);
83000+ } else
83001+ return -EPERM;
83002+ }
83003+#endif
83004+ return 0;
83005+}
83006+
83007+int gr_process_user_ban(void)
83008+{
83009+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
83010+ return __gr_process_user_ban(current->cred->user);
83011+#endif
83012+ return 0;
83013+}
83014diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
83015new file mode 100644
83016index 0000000..7512ea9
83017--- /dev/null
83018+++ b/grsecurity/grsec_sock.c
83019@@ -0,0 +1,275 @@
83020+#include <linux/kernel.h>
83021+#include <linux/module.h>
83022+#include <linux/sched.h>
83023+#include <linux/file.h>
83024+#include <linux/net.h>
83025+#include <linux/in.h>
83026+#include <linux/ip.h>
83027+#include <net/sock.h>
83028+#include <net/inet_sock.h>
83029+#include <linux/grsecurity.h>
83030+#include <linux/grinternal.h>
83031+#include <linux/gracl.h>
83032+
83033+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
83034+EXPORT_SYMBOL(gr_cap_rtnetlink);
83035+
83036+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
83037+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
83038+
83039+EXPORT_SYMBOL(gr_search_udp_recvmsg);
83040+EXPORT_SYMBOL(gr_search_udp_sendmsg);
83041+
83042+#ifdef CONFIG_UNIX_MODULE
83043+EXPORT_SYMBOL(gr_acl_handle_unix);
83044+EXPORT_SYMBOL(gr_acl_handle_mknod);
83045+EXPORT_SYMBOL(gr_handle_chroot_unix);
83046+EXPORT_SYMBOL(gr_handle_create);
83047+#endif
83048+
83049+#ifdef CONFIG_GRKERNSEC
83050+#define gr_conn_table_size 32749
83051+struct conn_table_entry {
83052+ struct conn_table_entry *next;
83053+ struct signal_struct *sig;
83054+};
83055+
83056+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
83057+DEFINE_SPINLOCK(gr_conn_table_lock);
83058+
83059+extern const char * gr_socktype_to_name(unsigned char type);
83060+extern const char * gr_proto_to_name(unsigned char proto);
83061+extern const char * gr_sockfamily_to_name(unsigned char family);
83062+
83063+static __inline__ int
83064+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
83065+{
83066+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
83067+}
83068+
83069+static __inline__ int
83070+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
83071+ __u16 sport, __u16 dport)
83072+{
83073+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
83074+ sig->gr_sport == sport && sig->gr_dport == dport))
83075+ return 1;
83076+ else
83077+ return 0;
83078+}
83079+
83080+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
83081+{
83082+ struct conn_table_entry **match;
83083+ unsigned int index;
83084+
83085+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
83086+ sig->gr_sport, sig->gr_dport,
83087+ gr_conn_table_size);
83088+
83089+ newent->sig = sig;
83090+
83091+ match = &gr_conn_table[index];
83092+ newent->next = *match;
83093+ *match = newent;
83094+
83095+ return;
83096+}
83097+
83098+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
83099+{
83100+ struct conn_table_entry *match, *last = NULL;
83101+ unsigned int index;
83102+
83103+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
83104+ sig->gr_sport, sig->gr_dport,
83105+ gr_conn_table_size);
83106+
83107+ match = gr_conn_table[index];
83108+ while (match && !conn_match(match->sig,
83109+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
83110+ sig->gr_dport)) {
83111+ last = match;
83112+ match = match->next;
83113+ }
83114+
83115+ if (match) {
83116+ if (last)
83117+ last->next = match->next;
83118+ else
83119+ gr_conn_table[index] = NULL;
83120+ kfree(match);
83121+ }
83122+
83123+ return;
83124+}
83125+
83126+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
83127+ __u16 sport, __u16 dport)
83128+{
83129+ struct conn_table_entry *match;
83130+ unsigned int index;
83131+
83132+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
83133+
83134+ match = gr_conn_table[index];
83135+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
83136+ match = match->next;
83137+
83138+ if (match)
83139+ return match->sig;
83140+ else
83141+ return NULL;
83142+}
83143+
83144+#endif
83145+
83146+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
83147+{
83148+#ifdef CONFIG_GRKERNSEC
83149+ struct signal_struct *sig = task->signal;
83150+ struct conn_table_entry *newent;
83151+
83152+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
83153+ if (newent == NULL)
83154+ return;
83155+ /* no bh lock needed since we are called with bh disabled */
83156+ spin_lock(&gr_conn_table_lock);
83157+ gr_del_task_from_ip_table_nolock(sig);
83158+ sig->gr_saddr = inet->rcv_saddr;
83159+ sig->gr_daddr = inet->daddr;
83160+ sig->gr_sport = inet->sport;
83161+ sig->gr_dport = inet->dport;
83162+ gr_add_to_task_ip_table_nolock(sig, newent);
83163+ spin_unlock(&gr_conn_table_lock);
83164+#endif
83165+ return;
83166+}
83167+
83168+void gr_del_task_from_ip_table(struct task_struct *task)
83169+{
83170+#ifdef CONFIG_GRKERNSEC
83171+ spin_lock_bh(&gr_conn_table_lock);
83172+ gr_del_task_from_ip_table_nolock(task->signal);
83173+ spin_unlock_bh(&gr_conn_table_lock);
83174+#endif
83175+ return;
83176+}
83177+
83178+void
83179+gr_attach_curr_ip(const struct sock *sk)
83180+{
83181+#ifdef CONFIG_GRKERNSEC
83182+ struct signal_struct *p, *set;
83183+ const struct inet_sock *inet = inet_sk(sk);
83184+
83185+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
83186+ return;
83187+
83188+ set = current->signal;
83189+
83190+ spin_lock_bh(&gr_conn_table_lock);
83191+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
83192+ inet->dport, inet->sport);
83193+ if (unlikely(p != NULL)) {
83194+ set->curr_ip = p->curr_ip;
83195+ set->used_accept = 1;
83196+ gr_del_task_from_ip_table_nolock(p);
83197+ spin_unlock_bh(&gr_conn_table_lock);
83198+ return;
83199+ }
83200+ spin_unlock_bh(&gr_conn_table_lock);
83201+
83202+ set->curr_ip = inet->daddr;
83203+ set->used_accept = 1;
83204+#endif
83205+ return;
83206+}
83207+
83208+int
83209+gr_handle_sock_all(const int family, const int type, const int protocol)
83210+{
83211+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
83212+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
83213+ (family != AF_UNIX)) {
83214+ if (family == AF_INET)
83215+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
83216+ else
83217+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
83218+ return -EACCES;
83219+ }
83220+#endif
83221+ return 0;
83222+}
83223+
83224+int
83225+gr_handle_sock_server(const struct sockaddr *sck)
83226+{
83227+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
83228+ if (grsec_enable_socket_server &&
83229+ in_group_p(grsec_socket_server_gid) &&
83230+ sck && (sck->sa_family != AF_UNIX) &&
83231+ (sck->sa_family != AF_LOCAL)) {
83232+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
83233+ return -EACCES;
83234+ }
83235+#endif
83236+ return 0;
83237+}
83238+
83239+int
83240+gr_handle_sock_server_other(const struct sock *sck)
83241+{
83242+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
83243+ if (grsec_enable_socket_server &&
83244+ in_group_p(grsec_socket_server_gid) &&
83245+ sck && (sck->sk_family != AF_UNIX) &&
83246+ (sck->sk_family != AF_LOCAL)) {
83247+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
83248+ return -EACCES;
83249+ }
83250+#endif
83251+ return 0;
83252+}
83253+
83254+int
83255+gr_handle_sock_client(const struct sockaddr *sck)
83256+{
83257+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
83258+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
83259+ sck && (sck->sa_family != AF_UNIX) &&
83260+ (sck->sa_family != AF_LOCAL)) {
83261+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
83262+ return -EACCES;
83263+ }
83264+#endif
83265+ return 0;
83266+}
83267+
83268+kernel_cap_t
83269+gr_cap_rtnetlink(struct sock *sock)
83270+{
83271+#ifdef CONFIG_GRKERNSEC
83272+ if (!gr_acl_is_enabled())
83273+ return current_cap();
83274+ else if (sock->sk_protocol == NETLINK_ISCSI &&
83275+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
83276+ gr_is_capable(CAP_SYS_ADMIN))
83277+ return current_cap();
83278+ else if (sock->sk_protocol == NETLINK_AUDIT &&
83279+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
83280+ gr_is_capable(CAP_AUDIT_WRITE) &&
83281+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
83282+ gr_is_capable(CAP_AUDIT_CONTROL))
83283+ return current_cap();
83284+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
83285+ ((sock->sk_protocol == NETLINK_ROUTE) ?
83286+ gr_is_capable_nolog(CAP_NET_ADMIN) :
83287+ gr_is_capable(CAP_NET_ADMIN)))
83288+ return current_cap();
83289+ else
83290+ return __cap_empty_set;
83291+#else
83292+ return current_cap();
83293+#endif
83294+}
83295diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
83296new file mode 100644
83297index 0000000..31f3258
83298--- /dev/null
83299+++ b/grsecurity/grsec_sysctl.c
83300@@ -0,0 +1,499 @@
83301+#include <linux/kernel.h>
83302+#include <linux/sched.h>
83303+#include <linux/sysctl.h>
83304+#include <linux/grsecurity.h>
83305+#include <linux/grinternal.h>
83306+
83307+int
83308+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
83309+{
83310+#ifdef CONFIG_GRKERNSEC_SYSCTL
83311+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
83312+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
83313+ return -EACCES;
83314+ }
83315+#endif
83316+ return 0;
83317+}
83318+
83319+#ifdef CONFIG_GRKERNSEC_ROFS
83320+static int __maybe_unused one = 1;
83321+#endif
83322+
83323+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
83324+ctl_table grsecurity_table[] = {
83325+#ifdef CONFIG_GRKERNSEC_SYSCTL
83326+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
83327+#ifdef CONFIG_GRKERNSEC_IO
83328+ {
83329+ .ctl_name = CTL_UNNUMBERED,
83330+ .procname = "disable_priv_io",
83331+ .data = &grsec_disable_privio,
83332+ .maxlen = sizeof(int),
83333+ .mode = 0600,
83334+ .proc_handler = &proc_dointvec,
83335+ },
83336+#endif
83337+#endif
83338+#ifdef CONFIG_GRKERNSEC_LINK
83339+ {
83340+ .ctl_name = CTL_UNNUMBERED,
83341+ .procname = "linking_restrictions",
83342+ .data = &grsec_enable_link,
83343+ .maxlen = sizeof(int),
83344+ .mode = 0600,
83345+ .proc_handler = &proc_dointvec,
83346+ },
83347+#endif
83348+#ifdef CONFIG_GRKERNSEC_BRUTE
83349+ {
83350+ .ctl_name = CTL_UNNUMBERED,
83351+ .procname = "deter_bruteforce",
83352+ .data = &grsec_enable_brute,
83353+ .maxlen = sizeof(int),
83354+ .mode = 0600,
83355+ .proc_handler = &proc_dointvec,
83356+ },
83357+#endif
83358+#ifdef CONFIG_GRKERNSEC_FIFO
83359+ {
83360+ .ctl_name = CTL_UNNUMBERED,
83361+ .procname = "fifo_restrictions",
83362+ .data = &grsec_enable_fifo,
83363+ .maxlen = sizeof(int),
83364+ .mode = 0600,
83365+ .proc_handler = &proc_dointvec,
83366+ },
83367+#endif
83368+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
83369+ {
83370+ .ctl_name = CTL_UNNUMBERED,
83371+ .procname = "ptrace_readexec",
83372+ .data = &grsec_enable_ptrace_readexec,
83373+ .maxlen = sizeof(int),
83374+ .mode = 0600,
83375+ .proc_handler = &proc_dointvec,
83376+ },
83377+#endif
83378+#ifdef CONFIG_GRKERNSEC_SETXID
83379+ {
83380+ .ctl_name = CTL_UNNUMBERED,
83381+ .procname = "consistent_setxid",
83382+ .data = &grsec_enable_setxid,
83383+ .maxlen = sizeof(int),
83384+ .mode = 0600,
83385+ .proc_handler = &proc_dointvec,
83386+ },
83387+#endif
83388+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
83389+ {
83390+ .ctl_name = CTL_UNNUMBERED,
83391+ .procname = "ip_blackhole",
83392+ .data = &grsec_enable_blackhole,
83393+ .maxlen = sizeof(int),
83394+ .mode = 0600,
83395+ .proc_handler = &proc_dointvec,
83396+ },
83397+ {
83398+ .ctl_name = CTL_UNNUMBERED,
83399+ .procname = "lastack_retries",
83400+ .data = &grsec_lastack_retries,
83401+ .maxlen = sizeof(int),
83402+ .mode = 0600,
83403+ .proc_handler = &proc_dointvec,
83404+ },
83405+#endif
83406+#ifdef CONFIG_GRKERNSEC_EXECLOG
83407+ {
83408+ .ctl_name = CTL_UNNUMBERED,
83409+ .procname = "exec_logging",
83410+ .data = &grsec_enable_execlog,
83411+ .maxlen = sizeof(int),
83412+ .mode = 0600,
83413+ .proc_handler = &proc_dointvec,
83414+ },
83415+#endif
83416+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
83417+ {
83418+ .ctl_name = CTL_UNNUMBERED,
83419+ .procname = "rwxmap_logging",
83420+ .data = &grsec_enable_log_rwxmaps,
83421+ .maxlen = sizeof(int),
83422+ .mode = 0600,
83423+ .proc_handler = &proc_dointvec,
83424+ },
83425+#endif
83426+#ifdef CONFIG_GRKERNSEC_SIGNAL
83427+ {
83428+ .ctl_name = CTL_UNNUMBERED,
83429+ .procname = "signal_logging",
83430+ .data = &grsec_enable_signal,
83431+ .maxlen = sizeof(int),
83432+ .mode = 0600,
83433+ .proc_handler = &proc_dointvec,
83434+ },
83435+#endif
83436+#ifdef CONFIG_GRKERNSEC_FORKFAIL
83437+ {
83438+ .ctl_name = CTL_UNNUMBERED,
83439+ .procname = "forkfail_logging",
83440+ .data = &grsec_enable_forkfail,
83441+ .maxlen = sizeof(int),
83442+ .mode = 0600,
83443+ .proc_handler = &proc_dointvec,
83444+ },
83445+#endif
83446+#ifdef CONFIG_GRKERNSEC_TIME
83447+ {
83448+ .ctl_name = CTL_UNNUMBERED,
83449+ .procname = "timechange_logging",
83450+ .data = &grsec_enable_time,
83451+ .maxlen = sizeof(int),
83452+ .mode = 0600,
83453+ .proc_handler = &proc_dointvec,
83454+ },
83455+#endif
83456+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
83457+ {
83458+ .ctl_name = CTL_UNNUMBERED,
83459+ .procname = "chroot_deny_shmat",
83460+ .data = &grsec_enable_chroot_shmat,
83461+ .maxlen = sizeof(int),
83462+ .mode = 0600,
83463+ .proc_handler = &proc_dointvec,
83464+ },
83465+#endif
83466+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
83467+ {
83468+ .ctl_name = CTL_UNNUMBERED,
83469+ .procname = "chroot_deny_unix",
83470+ .data = &grsec_enable_chroot_unix,
83471+ .maxlen = sizeof(int),
83472+ .mode = 0600,
83473+ .proc_handler = &proc_dointvec,
83474+ },
83475+#endif
83476+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
83477+ {
83478+ .ctl_name = CTL_UNNUMBERED,
83479+ .procname = "chroot_deny_mount",
83480+ .data = &grsec_enable_chroot_mount,
83481+ .maxlen = sizeof(int),
83482+ .mode = 0600,
83483+ .proc_handler = &proc_dointvec,
83484+ },
83485+#endif
83486+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
83487+ {
83488+ .ctl_name = CTL_UNNUMBERED,
83489+ .procname = "chroot_deny_fchdir",
83490+ .data = &grsec_enable_chroot_fchdir,
83491+ .maxlen = sizeof(int),
83492+ .mode = 0600,
83493+ .proc_handler = &proc_dointvec,
83494+ },
83495+#endif
83496+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
83497+ {
83498+ .ctl_name = CTL_UNNUMBERED,
83499+ .procname = "chroot_deny_chroot",
83500+ .data = &grsec_enable_chroot_double,
83501+ .maxlen = sizeof(int),
83502+ .mode = 0600,
83503+ .proc_handler = &proc_dointvec,
83504+ },
83505+#endif
83506+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
83507+ {
83508+ .ctl_name = CTL_UNNUMBERED,
83509+ .procname = "chroot_deny_pivot",
83510+ .data = &grsec_enable_chroot_pivot,
83511+ .maxlen = sizeof(int),
83512+ .mode = 0600,
83513+ .proc_handler = &proc_dointvec,
83514+ },
83515+#endif
83516+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
83517+ {
83518+ .ctl_name = CTL_UNNUMBERED,
83519+ .procname = "chroot_enforce_chdir",
83520+ .data = &grsec_enable_chroot_chdir,
83521+ .maxlen = sizeof(int),
83522+ .mode = 0600,
83523+ .proc_handler = &proc_dointvec,
83524+ },
83525+#endif
83526+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
83527+ {
83528+ .ctl_name = CTL_UNNUMBERED,
83529+ .procname = "chroot_deny_chmod",
83530+ .data = &grsec_enable_chroot_chmod,
83531+ .maxlen = sizeof(int),
83532+ .mode = 0600,
83533+ .proc_handler = &proc_dointvec,
83534+ },
83535+#endif
83536+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
83537+ {
83538+ .ctl_name = CTL_UNNUMBERED,
83539+ .procname = "chroot_deny_mknod",
83540+ .data = &grsec_enable_chroot_mknod,
83541+ .maxlen = sizeof(int),
83542+ .mode = 0600,
83543+ .proc_handler = &proc_dointvec,
83544+ },
83545+#endif
83546+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
83547+ {
83548+ .ctl_name = CTL_UNNUMBERED,
83549+ .procname = "chroot_restrict_nice",
83550+ .data = &grsec_enable_chroot_nice,
83551+ .maxlen = sizeof(int),
83552+ .mode = 0600,
83553+ .proc_handler = &proc_dointvec,
83554+ },
83555+#endif
83556+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
83557+ {
83558+ .ctl_name = CTL_UNNUMBERED,
83559+ .procname = "chroot_execlog",
83560+ .data = &grsec_enable_chroot_execlog,
83561+ .maxlen = sizeof(int),
83562+ .mode = 0600,
83563+ .proc_handler = &proc_dointvec,
83564+ },
83565+#endif
83566+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
83567+ {
83568+ .ctl_name = CTL_UNNUMBERED,
83569+ .procname = "chroot_caps",
83570+ .data = &grsec_enable_chroot_caps,
83571+ .maxlen = sizeof(int),
83572+ .mode = 0600,
83573+ .proc_handler = &proc_dointvec,
83574+ },
83575+#endif
83576+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
83577+ {
83578+ .ctl_name = CTL_UNNUMBERED,
83579+ .procname = "chroot_deny_sysctl",
83580+ .data = &grsec_enable_chroot_sysctl,
83581+ .maxlen = sizeof(int),
83582+ .mode = 0600,
83583+ .proc_handler = &proc_dointvec,
83584+ },
83585+#endif
83586+#ifdef CONFIG_GRKERNSEC_TPE
83587+ {
83588+ .ctl_name = CTL_UNNUMBERED,
83589+ .procname = "tpe",
83590+ .data = &grsec_enable_tpe,
83591+ .maxlen = sizeof(int),
83592+ .mode = 0600,
83593+ .proc_handler = &proc_dointvec,
83594+ },
83595+ {
83596+ .ctl_name = CTL_UNNUMBERED,
83597+ .procname = "tpe_gid",
83598+ .data = &grsec_tpe_gid,
83599+ .maxlen = sizeof(int),
83600+ .mode = 0600,
83601+ .proc_handler = &proc_dointvec,
83602+ },
83603+#endif
83604+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
83605+ {
83606+ .ctl_name = CTL_UNNUMBERED,
83607+ .procname = "tpe_invert",
83608+ .data = &grsec_enable_tpe_invert,
83609+ .maxlen = sizeof(int),
83610+ .mode = 0600,
83611+ .proc_handler = &proc_dointvec,
83612+ },
83613+#endif
83614+#ifdef CONFIG_GRKERNSEC_TPE_ALL
83615+ {
83616+ .ctl_name = CTL_UNNUMBERED,
83617+ .procname = "tpe_restrict_all",
83618+ .data = &grsec_enable_tpe_all,
83619+ .maxlen = sizeof(int),
83620+ .mode = 0600,
83621+ .proc_handler = &proc_dointvec,
83622+ },
83623+#endif
83624+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
83625+ {
83626+ .ctl_name = CTL_UNNUMBERED,
83627+ .procname = "socket_all",
83628+ .data = &grsec_enable_socket_all,
83629+ .maxlen = sizeof(int),
83630+ .mode = 0600,
83631+ .proc_handler = &proc_dointvec,
83632+ },
83633+ {
83634+ .ctl_name = CTL_UNNUMBERED,
83635+ .procname = "socket_all_gid",
83636+ .data = &grsec_socket_all_gid,
83637+ .maxlen = sizeof(int),
83638+ .mode = 0600,
83639+ .proc_handler = &proc_dointvec,
83640+ },
83641+#endif
83642+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
83643+ {
83644+ .ctl_name = CTL_UNNUMBERED,
83645+ .procname = "socket_client",
83646+ .data = &grsec_enable_socket_client,
83647+ .maxlen = sizeof(int),
83648+ .mode = 0600,
83649+ .proc_handler = &proc_dointvec,
83650+ },
83651+ {
83652+ .ctl_name = CTL_UNNUMBERED,
83653+ .procname = "socket_client_gid",
83654+ .data = &grsec_socket_client_gid,
83655+ .maxlen = sizeof(int),
83656+ .mode = 0600,
83657+ .proc_handler = &proc_dointvec,
83658+ },
83659+#endif
83660+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
83661+ {
83662+ .ctl_name = CTL_UNNUMBERED,
83663+ .procname = "socket_server",
83664+ .data = &grsec_enable_socket_server,
83665+ .maxlen = sizeof(int),
83666+ .mode = 0600,
83667+ .proc_handler = &proc_dointvec,
83668+ },
83669+ {
83670+ .ctl_name = CTL_UNNUMBERED,
83671+ .procname = "socket_server_gid",
83672+ .data = &grsec_socket_server_gid,
83673+ .maxlen = sizeof(int),
83674+ .mode = 0600,
83675+ .proc_handler = &proc_dointvec,
83676+ },
83677+#endif
83678+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
83679+ {
83680+ .ctl_name = CTL_UNNUMBERED,
83681+ .procname = "audit_group",
83682+ .data = &grsec_enable_group,
83683+ .maxlen = sizeof(int),
83684+ .mode = 0600,
83685+ .proc_handler = &proc_dointvec,
83686+ },
83687+ {
83688+ .ctl_name = CTL_UNNUMBERED,
83689+ .procname = "audit_gid",
83690+ .data = &grsec_audit_gid,
83691+ .maxlen = sizeof(int),
83692+ .mode = 0600,
83693+ .proc_handler = &proc_dointvec,
83694+ },
83695+#endif
83696+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
83697+ {
83698+ .ctl_name = CTL_UNNUMBERED,
83699+ .procname = "audit_chdir",
83700+ .data = &grsec_enable_chdir,
83701+ .maxlen = sizeof(int),
83702+ .mode = 0600,
83703+ .proc_handler = &proc_dointvec,
83704+ },
83705+#endif
83706+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
83707+ {
83708+ .ctl_name = CTL_UNNUMBERED,
83709+ .procname = "audit_mount",
83710+ .data = &grsec_enable_mount,
83711+ .maxlen = sizeof(int),
83712+ .mode = 0600,
83713+ .proc_handler = &proc_dointvec,
83714+ },
83715+#endif
83716+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
83717+ {
83718+ .ctl_name = CTL_UNNUMBERED,
83719+ .procname = "audit_textrel",
83720+ .data = &grsec_enable_audit_textrel,
83721+ .maxlen = sizeof(int),
83722+ .mode = 0600,
83723+ .proc_handler = &proc_dointvec,
83724+ },
83725+#endif
83726+#ifdef CONFIG_GRKERNSEC_DMESG
83727+ {
83728+ .ctl_name = CTL_UNNUMBERED,
83729+ .procname = "dmesg",
83730+ .data = &grsec_enable_dmesg,
83731+ .maxlen = sizeof(int),
83732+ .mode = 0600,
83733+ .proc_handler = &proc_dointvec,
83734+ },
83735+#endif
83736+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
83737+ {
83738+ .ctl_name = CTL_UNNUMBERED,
83739+ .procname = "chroot_findtask",
83740+ .data = &grsec_enable_chroot_findtask,
83741+ .maxlen = sizeof(int),
83742+ .mode = 0600,
83743+ .proc_handler = &proc_dointvec,
83744+ },
83745+#endif
83746+#ifdef CONFIG_GRKERNSEC_RESLOG
83747+ {
83748+ .ctl_name = CTL_UNNUMBERED,
83749+ .procname = "resource_logging",
83750+ .data = &grsec_resource_logging,
83751+ .maxlen = sizeof(int),
83752+ .mode = 0600,
83753+ .proc_handler = &proc_dointvec,
83754+ },
83755+#endif
83756+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
83757+ {
83758+ .ctl_name = CTL_UNNUMBERED,
83759+ .procname = "audit_ptrace",
83760+ .data = &grsec_enable_audit_ptrace,
83761+ .maxlen = sizeof(int),
83762+ .mode = 0600,
83763+ .proc_handler = &proc_dointvec,
83764+ },
83765+#endif
83766+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
83767+ {
83768+ .ctl_name = CTL_UNNUMBERED,
83769+ .procname = "harden_ptrace",
83770+ .data = &grsec_enable_harden_ptrace,
83771+ .maxlen = sizeof(int),
83772+ .mode = 0600,
83773+ .proc_handler = &proc_dointvec,
83774+ },
83775+#endif
83776+ {
83777+ .ctl_name = CTL_UNNUMBERED,
83778+ .procname = "grsec_lock",
83779+ .data = &grsec_lock,
83780+ .maxlen = sizeof(int),
83781+ .mode = 0600,
83782+ .proc_handler = &proc_dointvec,
83783+ },
83784+#endif
83785+#ifdef CONFIG_GRKERNSEC_ROFS
83786+ {
83787+ .ctl_name = CTL_UNNUMBERED,
83788+ .procname = "romount_protect",
83789+ .data = &grsec_enable_rofs,
83790+ .maxlen = sizeof(int),
83791+ .mode = 0600,
83792+ .proc_handler = &proc_dointvec_minmax,
83793+ .extra1 = &one,
83794+ .extra2 = &one,
83795+ },
83796+#endif
83797+ { .ctl_name = 0 }
83798+};
83799+#endif
83800diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
83801new file mode 100644
83802index 0000000..0dc13c3
83803--- /dev/null
83804+++ b/grsecurity/grsec_time.c
83805@@ -0,0 +1,16 @@
83806+#include <linux/kernel.h>
83807+#include <linux/sched.h>
83808+#include <linux/grinternal.h>
83809+#include <linux/module.h>
83810+
83811+void
83812+gr_log_timechange(void)
83813+{
83814+#ifdef CONFIG_GRKERNSEC_TIME
83815+ if (grsec_enable_time)
83816+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
83817+#endif
83818+ return;
83819+}
83820+
83821+EXPORT_SYMBOL(gr_log_timechange);
83822diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
83823new file mode 100644
83824index 0000000..07e0dc0
83825--- /dev/null
83826+++ b/grsecurity/grsec_tpe.c
83827@@ -0,0 +1,73 @@
83828+#include <linux/kernel.h>
83829+#include <linux/sched.h>
83830+#include <linux/file.h>
83831+#include <linux/fs.h>
83832+#include <linux/grinternal.h>
83833+
83834+extern int gr_acl_tpe_check(void);
83835+
83836+int
83837+gr_tpe_allow(const struct file *file)
83838+{
83839+#ifdef CONFIG_GRKERNSEC
83840+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
83841+ const struct cred *cred = current_cred();
83842+ char *msg = NULL;
83843+ char *msg2 = NULL;
83844+
83845+ // never restrict root
83846+ if (!cred->uid)
83847+ return 1;
83848+
83849+ if (grsec_enable_tpe) {
83850+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
83851+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
83852+ msg = "not being in trusted group";
83853+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
83854+ msg = "being in untrusted group";
83855+#else
83856+ if (in_group_p(grsec_tpe_gid))
83857+ msg = "being in untrusted group";
83858+#endif
83859+ }
83860+ if (!msg && gr_acl_tpe_check())
83861+ msg = "being in untrusted role";
83862+
83863+ // not in any affected group/role
83864+ if (!msg)
83865+ goto next_check;
83866+
83867+ if (inode->i_uid)
83868+ msg2 = "file in non-root-owned directory";
83869+ else if (inode->i_mode & S_IWOTH)
83870+ msg2 = "file in world-writable directory";
83871+ else if (inode->i_mode & S_IWGRP)
83872+ msg2 = "file in group-writable directory";
83873+
83874+ if (msg && msg2) {
83875+ char fullmsg[70] = {0};
83876+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
83877+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
83878+ return 0;
83879+ }
83880+ msg = NULL;
83881+next_check:
83882+#ifdef CONFIG_GRKERNSEC_TPE_ALL
83883+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
83884+ return 1;
83885+
83886+ if (inode->i_uid && (inode->i_uid != cred->uid))
83887+ msg = "directory not owned by user";
83888+ else if (inode->i_mode & S_IWOTH)
83889+ msg = "file in world-writable directory";
83890+ else if (inode->i_mode & S_IWGRP)
83891+ msg = "file in group-writable directory";
83892+
83893+ if (msg) {
83894+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
83895+ return 0;
83896+ }
83897+#endif
83898+#endif
83899+ return 1;
83900+}
83901diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
83902new file mode 100644
83903index 0000000..9f7b1ac
83904--- /dev/null
83905+++ b/grsecurity/grsum.c
83906@@ -0,0 +1,61 @@
83907+#include <linux/err.h>
83908+#include <linux/kernel.h>
83909+#include <linux/sched.h>
83910+#include <linux/mm.h>
83911+#include <linux/scatterlist.h>
83912+#include <linux/crypto.h>
83913+#include <linux/gracl.h>
83914+
83915+
83916+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
83917+#error "crypto and sha256 must be built into the kernel"
83918+#endif
83919+
83920+int
83921+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
83922+{
83923+ char *p;
83924+ struct crypto_hash *tfm;
83925+ struct hash_desc desc;
83926+ struct scatterlist sg;
83927+ unsigned char temp_sum[GR_SHA_LEN];
83928+ volatile int retval = 0;
83929+ volatile int dummy = 0;
83930+ unsigned int i;
83931+
83932+ sg_init_table(&sg, 1);
83933+
83934+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
83935+ if (IS_ERR(tfm)) {
83936+ /* should never happen, since sha256 should be built in */
83937+ return 1;
83938+ }
83939+
83940+ desc.tfm = tfm;
83941+ desc.flags = 0;
83942+
83943+ crypto_hash_init(&desc);
83944+
83945+ p = salt;
83946+ sg_set_buf(&sg, p, GR_SALT_LEN);
83947+ crypto_hash_update(&desc, &sg, sg.length);
83948+
83949+ p = entry->pw;
83950+ sg_set_buf(&sg, p, strlen(p));
83951+
83952+ crypto_hash_update(&desc, &sg, sg.length);
83953+
83954+ crypto_hash_final(&desc, temp_sum);
83955+
83956+ memset(entry->pw, 0, GR_PW_LEN);
83957+
83958+ for (i = 0; i < GR_SHA_LEN; i++)
83959+ if (sum[i] != temp_sum[i])
83960+ retval = 1;
83961+ else
83962+ dummy = 1; // waste a cycle
83963+
83964+ crypto_free_hash(tfm);
83965+
83966+ return retval;
83967+}
83968diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
83969index 3cd9ccd..fe16d47 100644
83970--- a/include/acpi/acpi_bus.h
83971+++ b/include/acpi/acpi_bus.h
83972@@ -107,7 +107,7 @@ struct acpi_device_ops {
83973 acpi_op_bind bind;
83974 acpi_op_unbind unbind;
83975 acpi_op_notify notify;
83976-};
83977+} __no_const;
83978
83979 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
83980
83981diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
83982index f4906f6..71feb73 100644
83983--- a/include/acpi/acpi_drivers.h
83984+++ b/include/acpi/acpi_drivers.h
83985@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
83986 Dock Station
83987 -------------------------------------------------------------------------- */
83988 struct acpi_dock_ops {
83989- acpi_notify_handler handler;
83990- acpi_notify_handler uevent;
83991+ const acpi_notify_handler handler;
83992+ const acpi_notify_handler uevent;
83993 };
83994
83995 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
83996@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
83997 extern int register_dock_notifier(struct notifier_block *nb);
83998 extern void unregister_dock_notifier(struct notifier_block *nb);
83999 extern int register_hotplug_dock_device(acpi_handle handle,
84000- struct acpi_dock_ops *ops,
84001+ const struct acpi_dock_ops *ops,
84002 void *context);
84003 extern void unregister_hotplug_dock_device(acpi_handle handle);
84004 #else
84005@@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
84006 {
84007 }
84008 static inline int register_hotplug_dock_device(acpi_handle handle,
84009- struct acpi_dock_ops *ops,
84010+ const struct acpi_dock_ops *ops,
84011 void *context)
84012 {
84013 return -ENODEV;
84014diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
84015index b7babf0..a9ac9fc 100644
84016--- a/include/asm-generic/atomic-long.h
84017+++ b/include/asm-generic/atomic-long.h
84018@@ -22,6 +22,12 @@
84019
84020 typedef atomic64_t atomic_long_t;
84021
84022+#ifdef CONFIG_PAX_REFCOUNT
84023+typedef atomic64_unchecked_t atomic_long_unchecked_t;
84024+#else
84025+typedef atomic64_t atomic_long_unchecked_t;
84026+#endif
84027+
84028 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
84029
84030 static inline long atomic_long_read(atomic_long_t *l)
84031@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
84032 return (long)atomic64_read(v);
84033 }
84034
84035+#ifdef CONFIG_PAX_REFCOUNT
84036+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
84037+{
84038+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
84039+
84040+ return (long)atomic64_read_unchecked(v);
84041+}
84042+#endif
84043+
84044 static inline void atomic_long_set(atomic_long_t *l, long i)
84045 {
84046 atomic64_t *v = (atomic64_t *)l;
84047@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
84048 atomic64_set(v, i);
84049 }
84050
84051+#ifdef CONFIG_PAX_REFCOUNT
84052+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
84053+{
84054+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
84055+
84056+ atomic64_set_unchecked(v, i);
84057+}
84058+#endif
84059+
84060 static inline void atomic_long_inc(atomic_long_t *l)
84061 {
84062 atomic64_t *v = (atomic64_t *)l;
84063@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
84064 atomic64_inc(v);
84065 }
84066
84067+#ifdef CONFIG_PAX_REFCOUNT
84068+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
84069+{
84070+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
84071+
84072+ atomic64_inc_unchecked(v);
84073+}
84074+#endif
84075+
84076 static inline void atomic_long_dec(atomic_long_t *l)
84077 {
84078 atomic64_t *v = (atomic64_t *)l;
84079@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
84080 atomic64_dec(v);
84081 }
84082
84083+#ifdef CONFIG_PAX_REFCOUNT
84084+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
84085+{
84086+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
84087+
84088+ atomic64_dec_unchecked(v);
84089+}
84090+#endif
84091+
84092 static inline void atomic_long_add(long i, atomic_long_t *l)
84093 {
84094 atomic64_t *v = (atomic64_t *)l;
84095@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
84096 atomic64_add(i, v);
84097 }
84098
84099+#ifdef CONFIG_PAX_REFCOUNT
84100+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
84101+{
84102+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
84103+
84104+ atomic64_add_unchecked(i, v);
84105+}
84106+#endif
84107+
84108 static inline void atomic_long_sub(long i, atomic_long_t *l)
84109 {
84110 atomic64_t *v = (atomic64_t *)l;
84111@@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
84112 return (long)atomic64_inc_return(v);
84113 }
84114
84115+#ifdef CONFIG_PAX_REFCOUNT
84116+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
84117+{
84118+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
84119+
84120+ return (long)atomic64_inc_return_unchecked(v);
84121+}
84122+#endif
84123+
84124 static inline long atomic_long_dec_return(atomic_long_t *l)
84125 {
84126 atomic64_t *v = (atomic64_t *)l;
84127@@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
84128
84129 typedef atomic_t atomic_long_t;
84130
84131+#ifdef CONFIG_PAX_REFCOUNT
84132+typedef atomic_unchecked_t atomic_long_unchecked_t;
84133+#else
84134+typedef atomic_t atomic_long_unchecked_t;
84135+#endif
84136+
84137 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
84138 static inline long atomic_long_read(atomic_long_t *l)
84139 {
84140@@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l)
84141 return (long)atomic_read(v);
84142 }
84143
84144+#ifdef CONFIG_PAX_REFCOUNT
84145+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
84146+{
84147+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84148+
84149+ return (long)atomic_read_unchecked(v);
84150+}
84151+#endif
84152+
84153 static inline void atomic_long_set(atomic_long_t *l, long i)
84154 {
84155 atomic_t *v = (atomic_t *)l;
84156@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
84157 atomic_set(v, i);
84158 }
84159
84160+#ifdef CONFIG_PAX_REFCOUNT
84161+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
84162+{
84163+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84164+
84165+ atomic_set_unchecked(v, i);
84166+}
84167+#endif
84168+
84169 static inline void atomic_long_inc(atomic_long_t *l)
84170 {
84171 atomic_t *v = (atomic_t *)l;
84172@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
84173 atomic_inc(v);
84174 }
84175
84176+#ifdef CONFIG_PAX_REFCOUNT
84177+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
84178+{
84179+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84180+
84181+ atomic_inc_unchecked(v);
84182+}
84183+#endif
84184+
84185 static inline void atomic_long_dec(atomic_long_t *l)
84186 {
84187 atomic_t *v = (atomic_t *)l;
84188@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
84189 atomic_dec(v);
84190 }
84191
84192+#ifdef CONFIG_PAX_REFCOUNT
84193+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
84194+{
84195+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84196+
84197+ atomic_dec_unchecked(v);
84198+}
84199+#endif
84200+
84201 static inline void atomic_long_add(long i, atomic_long_t *l)
84202 {
84203 atomic_t *v = (atomic_t *)l;
84204@@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
84205 atomic_add(i, v);
84206 }
84207
84208+#ifdef CONFIG_PAX_REFCOUNT
84209+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
84210+{
84211+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84212+
84213+ atomic_add_unchecked(i, v);
84214+}
84215+#endif
84216+
84217 static inline void atomic_long_sub(long i, atomic_long_t *l)
84218 {
84219 atomic_t *v = (atomic_t *)l;
84220@@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
84221 return (long)atomic_inc_return(v);
84222 }
84223
84224+#ifdef CONFIG_PAX_REFCOUNT
84225+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
84226+{
84227+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84228+
84229+ return (long)atomic_inc_return_unchecked(v);
84230+}
84231+#endif
84232+
84233 static inline long atomic_long_dec_return(atomic_long_t *l)
84234 {
84235 atomic_t *v = (atomic_t *)l;
84236@@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
84237
84238 #endif /* BITS_PER_LONG == 64 */
84239
84240+#ifdef CONFIG_PAX_REFCOUNT
84241+static inline void pax_refcount_needs_these_functions(void)
84242+{
84243+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
84244+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
84245+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
84246+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
84247+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
84248+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
84249+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
84250+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
84251+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
84252+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
84253+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
84254+
84255+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
84256+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
84257+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
84258+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
84259+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
84260+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
84261+}
84262+#else
84263+#define atomic_read_unchecked(v) atomic_read(v)
84264+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
84265+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
84266+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
84267+#define atomic_inc_unchecked(v) atomic_inc(v)
84268+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
84269+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
84270+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
84271+#define atomic_dec_unchecked(v) atomic_dec(v)
84272+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
84273+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
84274+
84275+#define atomic_long_read_unchecked(v) atomic_long_read(v)
84276+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
84277+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
84278+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
84279+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
84280+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
84281+#endif
84282+
84283 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
84284diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
84285index b18ce4f..2ee2843 100644
84286--- a/include/asm-generic/atomic64.h
84287+++ b/include/asm-generic/atomic64.h
84288@@ -16,6 +16,8 @@ typedef struct {
84289 long long counter;
84290 } atomic64_t;
84291
84292+typedef atomic64_t atomic64_unchecked_t;
84293+
84294 #define ATOMIC64_INIT(i) { (i) }
84295
84296 extern long long atomic64_read(const atomic64_t *v);
84297@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
84298 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
84299 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
84300
84301+#define atomic64_read_unchecked(v) atomic64_read(v)
84302+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
84303+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
84304+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
84305+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
84306+#define atomic64_inc_unchecked(v) atomic64_inc(v)
84307+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
84308+#define atomic64_dec_unchecked(v) atomic64_dec(v)
84309+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
84310+
84311 #endif /* _ASM_GENERIC_ATOMIC64_H */
84312diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
84313index d48ddf0..656a0ac 100644
84314--- a/include/asm-generic/bug.h
84315+++ b/include/asm-generic/bug.h
84316@@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line);
84317
84318 #else /* !CONFIG_BUG */
84319 #ifndef HAVE_ARCH_BUG
84320-#define BUG() do {} while(0)
84321+#define BUG() do { for (;;) ; } while(0)
84322 #endif
84323
84324 #ifndef HAVE_ARCH_BUG_ON
84325-#define BUG_ON(condition) do { if (condition) ; } while(0)
84326+#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
84327 #endif
84328
84329 #ifndef HAVE_ARCH_WARN_ON
84330diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
84331index 1bfcfe5..e04c5c9 100644
84332--- a/include/asm-generic/cache.h
84333+++ b/include/asm-generic/cache.h
84334@@ -6,7 +6,7 @@
84335 * cache lines need to provide their own cache.h.
84336 */
84337
84338-#define L1_CACHE_SHIFT 5
84339-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
84340+#define L1_CACHE_SHIFT 5UL
84341+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
84342
84343 #endif /* __ASM_GENERIC_CACHE_H */
84344diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
84345index 6920695..41038bc 100644
84346--- a/include/asm-generic/dma-mapping-common.h
84347+++ b/include/asm-generic/dma-mapping-common.h
84348@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
84349 enum dma_data_direction dir,
84350 struct dma_attrs *attrs)
84351 {
84352- struct dma_map_ops *ops = get_dma_ops(dev);
84353+ const struct dma_map_ops *ops = get_dma_ops(dev);
84354 dma_addr_t addr;
84355
84356 kmemcheck_mark_initialized(ptr, size);
84357@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
84358 enum dma_data_direction dir,
84359 struct dma_attrs *attrs)
84360 {
84361- struct dma_map_ops *ops = get_dma_ops(dev);
84362+ const struct dma_map_ops *ops = get_dma_ops(dev);
84363
84364 BUG_ON(!valid_dma_direction(dir));
84365 if (ops->unmap_page)
84366@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
84367 int nents, enum dma_data_direction dir,
84368 struct dma_attrs *attrs)
84369 {
84370- struct dma_map_ops *ops = get_dma_ops(dev);
84371+ const struct dma_map_ops *ops = get_dma_ops(dev);
84372 int i, ents;
84373 struct scatterlist *s;
84374
84375@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
84376 int nents, enum dma_data_direction dir,
84377 struct dma_attrs *attrs)
84378 {
84379- struct dma_map_ops *ops = get_dma_ops(dev);
84380+ const struct dma_map_ops *ops = get_dma_ops(dev);
84381
84382 BUG_ON(!valid_dma_direction(dir));
84383 debug_dma_unmap_sg(dev, sg, nents, dir);
84384@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
84385 size_t offset, size_t size,
84386 enum dma_data_direction dir)
84387 {
84388- struct dma_map_ops *ops = get_dma_ops(dev);
84389+ const struct dma_map_ops *ops = get_dma_ops(dev);
84390 dma_addr_t addr;
84391
84392 kmemcheck_mark_initialized(page_address(page) + offset, size);
84393@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
84394 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
84395 size_t size, enum dma_data_direction dir)
84396 {
84397- struct dma_map_ops *ops = get_dma_ops(dev);
84398+ const struct dma_map_ops *ops = get_dma_ops(dev);
84399
84400 BUG_ON(!valid_dma_direction(dir));
84401 if (ops->unmap_page)
84402@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
84403 size_t size,
84404 enum dma_data_direction dir)
84405 {
84406- struct dma_map_ops *ops = get_dma_ops(dev);
84407+ const struct dma_map_ops *ops = get_dma_ops(dev);
84408
84409 BUG_ON(!valid_dma_direction(dir));
84410 if (ops->sync_single_for_cpu)
84411@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
84412 dma_addr_t addr, size_t size,
84413 enum dma_data_direction dir)
84414 {
84415- struct dma_map_ops *ops = get_dma_ops(dev);
84416+ const struct dma_map_ops *ops = get_dma_ops(dev);
84417
84418 BUG_ON(!valid_dma_direction(dir));
84419 if (ops->sync_single_for_device)
84420@@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
84421 size_t size,
84422 enum dma_data_direction dir)
84423 {
84424- struct dma_map_ops *ops = get_dma_ops(dev);
84425+ const struct dma_map_ops *ops = get_dma_ops(dev);
84426
84427 BUG_ON(!valid_dma_direction(dir));
84428 if (ops->sync_single_range_for_cpu) {
84429@@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
84430 size_t size,
84431 enum dma_data_direction dir)
84432 {
84433- struct dma_map_ops *ops = get_dma_ops(dev);
84434+ const struct dma_map_ops *ops = get_dma_ops(dev);
84435
84436 BUG_ON(!valid_dma_direction(dir));
84437 if (ops->sync_single_range_for_device) {
84438@@ -155,7 +155,7 @@ static inline void
84439 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
84440 int nelems, enum dma_data_direction dir)
84441 {
84442- struct dma_map_ops *ops = get_dma_ops(dev);
84443+ const struct dma_map_ops *ops = get_dma_ops(dev);
84444
84445 BUG_ON(!valid_dma_direction(dir));
84446 if (ops->sync_sg_for_cpu)
84447@@ -167,7 +167,7 @@ static inline void
84448 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
84449 int nelems, enum dma_data_direction dir)
84450 {
84451- struct dma_map_ops *ops = get_dma_ops(dev);
84452+ const struct dma_map_ops *ops = get_dma_ops(dev);
84453
84454 BUG_ON(!valid_dma_direction(dir));
84455 if (ops->sync_sg_for_device)
84456diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
84457index 0d68a1e..b74a761 100644
84458--- a/include/asm-generic/emergency-restart.h
84459+++ b/include/asm-generic/emergency-restart.h
84460@@ -1,7 +1,7 @@
84461 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
84462 #define _ASM_GENERIC_EMERGENCY_RESTART_H
84463
84464-static inline void machine_emergency_restart(void)
84465+static inline __noreturn void machine_emergency_restart(void)
84466 {
84467 machine_restart(NULL);
84468 }
84469diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
84470index 3c2344f..4590a7d 100644
84471--- a/include/asm-generic/futex.h
84472+++ b/include/asm-generic/futex.h
84473@@ -6,7 +6,7 @@
84474 #include <asm/errno.h>
84475
84476 static inline int
84477-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
84478+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
84479 {
84480 int op = (encoded_op >> 28) & 7;
84481 int cmp = (encoded_op >> 24) & 15;
84482@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
84483 }
84484
84485 static inline int
84486-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
84487+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
84488 {
84489 return -ENOSYS;
84490 }
84491diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
84492index e5f234a..cdb16b3 100644
84493--- a/include/asm-generic/kmap_types.h
84494+++ b/include/asm-generic/kmap_types.h
84495@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
84496 KMAP_D(16) KM_IRQ_PTE,
84497 KMAP_D(17) KM_NMI,
84498 KMAP_D(18) KM_NMI_PTE,
84499-KMAP_D(19) KM_TYPE_NR
84500+KMAP_D(19) KM_CLEARPAGE,
84501+KMAP_D(20) KM_TYPE_NR
84502 };
84503
84504 #undef KMAP_D
84505diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
84506index fc21844..2ee9629 100644
84507--- a/include/asm-generic/local.h
84508+++ b/include/asm-generic/local.h
84509@@ -39,6 +39,7 @@ typedef struct
84510 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
84511 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
84512 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
84513+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
84514
84515 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
84516 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
84517diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
84518index 725612b..9cc513a 100644
84519--- a/include/asm-generic/pgtable-nopmd.h
84520+++ b/include/asm-generic/pgtable-nopmd.h
84521@@ -1,14 +1,19 @@
84522 #ifndef _PGTABLE_NOPMD_H
84523 #define _PGTABLE_NOPMD_H
84524
84525-#ifndef __ASSEMBLY__
84526-
84527 #include <asm-generic/pgtable-nopud.h>
84528
84529-struct mm_struct;
84530-
84531 #define __PAGETABLE_PMD_FOLDED
84532
84533+#define PMD_SHIFT PUD_SHIFT
84534+#define PTRS_PER_PMD 1
84535+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
84536+#define PMD_MASK (~(PMD_SIZE-1))
84537+
84538+#ifndef __ASSEMBLY__
84539+
84540+struct mm_struct;
84541+
84542 /*
84543 * Having the pmd type consist of a pud gets the size right, and allows
84544 * us to conceptually access the pud entry that this pmd is folded into
84545@@ -16,11 +21,6 @@ struct mm_struct;
84546 */
84547 typedef struct { pud_t pud; } pmd_t;
84548
84549-#define PMD_SHIFT PUD_SHIFT
84550-#define PTRS_PER_PMD 1
84551-#define PMD_SIZE (1UL << PMD_SHIFT)
84552-#define PMD_MASK (~(PMD_SIZE-1))
84553-
84554 /*
84555 * The "pud_xxx()" functions here are trivial for a folded two-level
84556 * setup: the pmd is never bad, and a pmd always exists (as it's folded
84557diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
84558index 810431d..ccc3638 100644
84559--- a/include/asm-generic/pgtable-nopud.h
84560+++ b/include/asm-generic/pgtable-nopud.h
84561@@ -1,10 +1,15 @@
84562 #ifndef _PGTABLE_NOPUD_H
84563 #define _PGTABLE_NOPUD_H
84564
84565-#ifndef __ASSEMBLY__
84566-
84567 #define __PAGETABLE_PUD_FOLDED
84568
84569+#define PUD_SHIFT PGDIR_SHIFT
84570+#define PTRS_PER_PUD 1
84571+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
84572+#define PUD_MASK (~(PUD_SIZE-1))
84573+
84574+#ifndef __ASSEMBLY__
84575+
84576 /*
84577 * Having the pud type consist of a pgd gets the size right, and allows
84578 * us to conceptually access the pgd entry that this pud is folded into
84579@@ -12,11 +17,6 @@
84580 */
84581 typedef struct { pgd_t pgd; } pud_t;
84582
84583-#define PUD_SHIFT PGDIR_SHIFT
84584-#define PTRS_PER_PUD 1
84585-#define PUD_SIZE (1UL << PUD_SHIFT)
84586-#define PUD_MASK (~(PUD_SIZE-1))
84587-
84588 /*
84589 * The "pgd_xxx()" functions here are trivial for a folded two-level
84590 * setup: the pud is never bad, and a pud always exists (as it's folded
84591diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
84592index e2bd73e..fea8ed3 100644
84593--- a/include/asm-generic/pgtable.h
84594+++ b/include/asm-generic/pgtable.h
84595@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
84596 unsigned long size);
84597 #endif
84598
84599+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
84600+static inline unsigned long pax_open_kernel(void) { return 0; }
84601+#endif
84602+
84603+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
84604+static inline unsigned long pax_close_kernel(void) { return 0; }
84605+#endif
84606+
84607 #endif /* !__ASSEMBLY__ */
84608
84609 #endif /* _ASM_GENERIC_PGTABLE_H */
84610diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
84611index b218b85..f0ac13a 100644
84612--- a/include/asm-generic/uaccess.h
84613+++ b/include/asm-generic/uaccess.h
84614@@ -76,6 +76,8 @@ extern unsigned long search_exception_table(unsigned long);
84615 */
84616 #ifndef __copy_from_user
84617 static inline __must_check long __copy_from_user(void *to,
84618+ const void __user * from, unsigned long n) __size_overflow(3);
84619+static inline __must_check long __copy_from_user(void *to,
84620 const void __user * from, unsigned long n)
84621 {
84622 if (__builtin_constant_p(n)) {
84623@@ -106,6 +108,8 @@ static inline __must_check long __copy_from_user(void *to,
84624
84625 #ifndef __copy_to_user
84626 static inline __must_check long __copy_to_user(void __user *to,
84627+ const void *from, unsigned long n) __size_overflow(3);
84628+static inline __must_check long __copy_to_user(void __user *to,
84629 const void *from, unsigned long n)
84630 {
84631 if (__builtin_constant_p(n)) {
84632@@ -224,6 +228,7 @@ extern int __put_user_bad(void) __attribute__((noreturn));
84633 -EFAULT; \
84634 })
84635
84636+static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) __size_overflow(1);
84637 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
84638 {
84639 size = __copy_from_user(x, ptr, size);
84640@@ -240,6 +245,7 @@ extern int __get_user_bad(void) __attribute__((noreturn));
84641 #define __copy_to_user_inatomic __copy_to_user
84642 #endif
84643
84644+static inline long copy_from_user(void *to, const void __user * from, unsigned long n) __size_overflow(3);
84645 static inline long copy_from_user(void *to,
84646 const void __user * from, unsigned long n)
84647 {
84648@@ -250,6 +256,7 @@ static inline long copy_from_user(void *to,
84649 return n;
84650 }
84651
84652+static inline long copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
84653 static inline long copy_to_user(void __user *to,
84654 const void *from, unsigned long n)
84655 {
84656@@ -265,6 +272,8 @@ static inline long copy_to_user(void __user *to,
84657 */
84658 #ifndef __strncpy_from_user
84659 static inline long
84660+__strncpy_from_user(char *dst, const char __user *src, unsigned long count) __size_overflow(3);
84661+static inline long
84662 __strncpy_from_user(char *dst, const char __user *src, long count)
84663 {
84664 char *tmp;
84665@@ -276,6 +285,8 @@ __strncpy_from_user(char *dst, const char __user *src, long count)
84666 #endif
84667
84668 static inline long
84669+strncpy_from_user(char *dst, const char __user *src, unsigned long count) __size_overflow(3);
84670+static inline long
84671 strncpy_from_user(char *dst, const char __user *src, long count)
84672 {
84673 if (!access_ok(VERIFY_READ, src, 1))
84674@@ -289,6 +300,7 @@ strncpy_from_user(char *dst, const char __user *src, long count)
84675 * Return 0 on exception, a value greater than N if too long
84676 */
84677 #ifndef strnlen_user
84678+static inline long strnlen_user(const char __user *src, unsigned long n) __size_overflow(2);
84679 static inline long strnlen_user(const char __user *src, long n)
84680 {
84681 if (!access_ok(VERIFY_READ, src, 1))
84682@@ -307,6 +319,8 @@ static inline long strlen_user(const char __user *src)
84683 */
84684 #ifndef __clear_user
84685 static inline __must_check unsigned long
84686+__clear_user(void __user *to, unsigned long n) __size_overflow(2);
84687+static inline __must_check unsigned long
84688 __clear_user(void __user *to, unsigned long n)
84689 {
84690 memset((void __force *)to, 0, n);
84691@@ -315,6 +329,8 @@ __clear_user(void __user *to, unsigned long n)
84692 #endif
84693
84694 static inline __must_check unsigned long
84695+clear_user(void __user *to, unsigned long n) __size_overflow(2);
84696+static inline __must_check unsigned long
84697 clear_user(void __user *to, unsigned long n)
84698 {
84699 might_sleep();
84700diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
84701index b6e818f..21aa58a 100644
84702--- a/include/asm-generic/vmlinux.lds.h
84703+++ b/include/asm-generic/vmlinux.lds.h
84704@@ -199,6 +199,7 @@
84705 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
84706 VMLINUX_SYMBOL(__start_rodata) = .; \
84707 *(.rodata) *(.rodata.*) \
84708+ *(.data.read_only) \
84709 *(__vermagic) /* Kernel version magic */ \
84710 *(__markers_strings) /* Markers: strings */ \
84711 *(__tracepoints_strings)/* Tracepoints: strings */ \
84712@@ -656,22 +657,24 @@
84713 * section in the linker script will go there too. @phdr should have
84714 * a leading colon.
84715 *
84716- * Note that this macros defines __per_cpu_load as an absolute symbol.
84717+ * Note that this macros defines per_cpu_load as an absolute symbol.
84718 * If there is no need to put the percpu section at a predetermined
84719 * address, use PERCPU().
84720 */
84721 #define PERCPU_VADDR(vaddr, phdr) \
84722- VMLINUX_SYMBOL(__per_cpu_load) = .; \
84723- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
84724+ per_cpu_load = .; \
84725+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
84726 - LOAD_OFFSET) { \
84727+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
84728 VMLINUX_SYMBOL(__per_cpu_start) = .; \
84729 *(.data.percpu.first) \
84730- *(.data.percpu.page_aligned) \
84731 *(.data.percpu) \
84732+ . = ALIGN(PAGE_SIZE); \
84733+ *(.data.percpu.page_aligned) \
84734 *(.data.percpu.shared_aligned) \
84735 VMLINUX_SYMBOL(__per_cpu_end) = .; \
84736 } phdr \
84737- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
84738+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
84739
84740 /**
84741 * PERCPU - define output section for percpu area, simple version
84742diff --git a/include/drm/drmP.h b/include/drm/drmP.h
84743index ebab6a6..351dba1 100644
84744--- a/include/drm/drmP.h
84745+++ b/include/drm/drmP.h
84746@@ -71,6 +71,7 @@
84747 #include <linux/workqueue.h>
84748 #include <linux/poll.h>
84749 #include <asm/pgalloc.h>
84750+#include <asm/local.h>
84751 #include "drm.h"
84752
84753 #include <linux/idr.h>
84754@@ -814,7 +815,7 @@ struct drm_driver {
84755 void (*vgaarb_irq)(struct drm_device *dev, bool state);
84756
84757 /* Driver private ops for this object */
84758- struct vm_operations_struct *gem_vm_ops;
84759+ const struct vm_operations_struct *gem_vm_ops;
84760
84761 int major;
84762 int minor;
84763@@ -917,7 +918,7 @@ struct drm_device {
84764
84765 /** \name Usage Counters */
84766 /*@{ */
84767- int open_count; /**< Outstanding files open */
84768+ local_t open_count; /**< Outstanding files open */
84769 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
84770 atomic_t vma_count; /**< Outstanding vma areas open */
84771 int buf_use; /**< Buffers in use -- cannot alloc */
84772@@ -928,7 +929,7 @@ struct drm_device {
84773 /*@{ */
84774 unsigned long counters;
84775 enum drm_stat_type types[15];
84776- atomic_t counts[15];
84777+ atomic_unchecked_t counts[15];
84778 /*@} */
84779
84780 struct list_head filelist;
84781@@ -1016,7 +1017,7 @@ struct drm_device {
84782 struct pci_controller *hose;
84783 #endif
84784 struct drm_sg_mem *sg; /**< Scatter gather memory */
84785- unsigned int num_crtcs; /**< Number of CRTCs on this device */
84786+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
84787 void *dev_private; /**< device private data */
84788 void *mm_private;
84789 struct address_space *dev_mapping;
84790@@ -1042,11 +1043,11 @@ struct drm_device {
84791 spinlock_t object_name_lock;
84792 struct idr object_name_idr;
84793 atomic_t object_count;
84794- atomic_t object_memory;
84795+ atomic_unchecked_t object_memory;
84796 atomic_t pin_count;
84797- atomic_t pin_memory;
84798+ atomic_unchecked_t pin_memory;
84799 atomic_t gtt_count;
84800- atomic_t gtt_memory;
84801+ atomic_unchecked_t gtt_memory;
84802 uint32_t gtt_total;
84803 uint32_t invalidate_domains; /* domains pending invalidation */
84804 uint32_t flush_domains; /* domains pending flush */
84805diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
84806index b29e201..3413cc9 100644
84807--- a/include/drm/drm_crtc_helper.h
84808+++ b/include/drm/drm_crtc_helper.h
84809@@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
84810
84811 /* reload the current crtc LUT */
84812 void (*load_lut)(struct drm_crtc *crtc);
84813-};
84814+} __no_const;
84815
84816 struct drm_encoder_helper_funcs {
84817 void (*dpms)(struct drm_encoder *encoder, int mode);
84818@@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
84819 struct drm_connector *connector);
84820 /* disable encoder when not in use - more explicit than dpms off */
84821 void (*disable)(struct drm_encoder *encoder);
84822-};
84823+} __no_const;
84824
84825 struct drm_connector_helper_funcs {
84826 int (*get_modes)(struct drm_connector *connector);
84827diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
84828index b199170..6f9e64c 100644
84829--- a/include/drm/ttm/ttm_memory.h
84830+++ b/include/drm/ttm/ttm_memory.h
84831@@ -47,7 +47,7 @@
84832
84833 struct ttm_mem_shrink {
84834 int (*do_shrink) (struct ttm_mem_shrink *);
84835-};
84836+} __no_const;
84837
84838 /**
84839 * struct ttm_mem_global - Global memory accounting structure.
84840diff --git a/include/linux/a.out.h b/include/linux/a.out.h
84841index e86dfca..40cc55f 100644
84842--- a/include/linux/a.out.h
84843+++ b/include/linux/a.out.h
84844@@ -39,6 +39,14 @@ enum machine_type {
84845 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
84846 };
84847
84848+/* Constants for the N_FLAGS field */
84849+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
84850+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
84851+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
84852+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
84853+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
84854+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
84855+
84856 #if !defined (N_MAGIC)
84857 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
84858 #endif
84859diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
84860index 817b237..62c10bc 100644
84861--- a/include/linux/atmdev.h
84862+++ b/include/linux/atmdev.h
84863@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
84864 #endif
84865
84866 struct k_atm_aal_stats {
84867-#define __HANDLE_ITEM(i) atomic_t i
84868+#define __HANDLE_ITEM(i) atomic_unchecked_t i
84869 __AAL_STAT_ITEMS
84870 #undef __HANDLE_ITEM
84871 };
84872diff --git a/include/linux/backlight.h b/include/linux/backlight.h
84873index 0f5f578..8c4f884 100644
84874--- a/include/linux/backlight.h
84875+++ b/include/linux/backlight.h
84876@@ -36,18 +36,18 @@ struct backlight_device;
84877 struct fb_info;
84878
84879 struct backlight_ops {
84880- unsigned int options;
84881+ const unsigned int options;
84882
84883 #define BL_CORE_SUSPENDRESUME (1 << 0)
84884
84885 /* Notify the backlight driver some property has changed */
84886- int (*update_status)(struct backlight_device *);
84887+ int (* const update_status)(struct backlight_device *);
84888 /* Return the current backlight brightness (accounting for power,
84889 fb_blank etc.) */
84890- int (*get_brightness)(struct backlight_device *);
84891+ int (* const get_brightness)(struct backlight_device *);
84892 /* Check if given framebuffer device is the one bound to this backlight;
84893 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
84894- int (*check_fb)(struct fb_info *);
84895+ int (* const check_fb)(struct fb_info *);
84896 };
84897
84898 /* This structure defines all the properties of a backlight */
84899@@ -86,7 +86,7 @@ struct backlight_device {
84900 registered this device has been unloaded, and if class_get_devdata()
84901 points to something in the body of that driver, it is also invalid. */
84902 struct mutex ops_lock;
84903- struct backlight_ops *ops;
84904+ const struct backlight_ops *ops;
84905
84906 /* The framebuffer notifier block */
84907 struct notifier_block fb_notif;
84908@@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
84909 }
84910
84911 extern struct backlight_device *backlight_device_register(const char *name,
84912- struct device *dev, void *devdata, struct backlight_ops *ops);
84913+ struct device *dev, void *devdata, const struct backlight_ops *ops);
84914 extern void backlight_device_unregister(struct backlight_device *bd);
84915 extern void backlight_force_update(struct backlight_device *bd,
84916 enum backlight_update_reason reason);
84917diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
84918index a3d802e..93a2ef4 100644
84919--- a/include/linux/binfmts.h
84920+++ b/include/linux/binfmts.h
84921@@ -18,7 +18,7 @@ struct pt_regs;
84922 #define BINPRM_BUF_SIZE 128
84923
84924 #ifdef __KERNEL__
84925-#include <linux/list.h>
84926+#include <linux/sched.h>
84927
84928 #define CORENAME_MAX_SIZE 128
84929
84930@@ -58,6 +58,7 @@ struct linux_binprm{
84931 unsigned interp_flags;
84932 unsigned interp_data;
84933 unsigned long loader, exec;
84934+ char tcomm[TASK_COMM_LEN];
84935 };
84936
84937 extern void acct_arg_size(struct linux_binprm *bprm, unsigned long pages);
84938@@ -83,6 +84,7 @@ struct linux_binfmt {
84939 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
84940 int (*load_shlib)(struct file *);
84941 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
84942+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
84943 unsigned long min_coredump; /* minimal dump size */
84944 int hasvdso;
84945 };
84946diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
84947index 5eb6cb0..a2906d2 100644
84948--- a/include/linux/blkdev.h
84949+++ b/include/linux/blkdev.h
84950@@ -1281,7 +1281,7 @@ struct block_device_operations {
84951 int (*revalidate_disk) (struct gendisk *);
84952 int (*getgeo)(struct block_device *, struct hd_geometry *);
84953 struct module *owner;
84954-};
84955+} __do_const;
84956
84957 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
84958 unsigned long);
84959diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
84960index 3b73b99..629d21b 100644
84961--- a/include/linux/blktrace_api.h
84962+++ b/include/linux/blktrace_api.h
84963@@ -160,7 +160,7 @@ struct blk_trace {
84964 struct dentry *dir;
84965 struct dentry *dropped_file;
84966 struct dentry *msg_file;
84967- atomic_t dropped;
84968+ atomic_unchecked_t dropped;
84969 };
84970
84971 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
84972diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
84973index 83195fb..0b0f77d 100644
84974--- a/include/linux/byteorder/little_endian.h
84975+++ b/include/linux/byteorder/little_endian.h
84976@@ -42,51 +42,51 @@
84977
84978 static inline __le64 __cpu_to_le64p(const __u64 *p)
84979 {
84980- return (__force __le64)*p;
84981+ return (__force const __le64)*p;
84982 }
84983 static inline __u64 __le64_to_cpup(const __le64 *p)
84984 {
84985- return (__force __u64)*p;
84986+ return (__force const __u64)*p;
84987 }
84988 static inline __le32 __cpu_to_le32p(const __u32 *p)
84989 {
84990- return (__force __le32)*p;
84991+ return (__force const __le32)*p;
84992 }
84993 static inline __u32 __le32_to_cpup(const __le32 *p)
84994 {
84995- return (__force __u32)*p;
84996+ return (__force const __u32)*p;
84997 }
84998 static inline __le16 __cpu_to_le16p(const __u16 *p)
84999 {
85000- return (__force __le16)*p;
85001+ return (__force const __le16)*p;
85002 }
85003 static inline __u16 __le16_to_cpup(const __le16 *p)
85004 {
85005- return (__force __u16)*p;
85006+ return (__force const __u16)*p;
85007 }
85008 static inline __be64 __cpu_to_be64p(const __u64 *p)
85009 {
85010- return (__force __be64)__swab64p(p);
85011+ return (__force const __be64)__swab64p(p);
85012 }
85013 static inline __u64 __be64_to_cpup(const __be64 *p)
85014 {
85015- return __swab64p((__u64 *)p);
85016+ return __swab64p((const __u64 *)p);
85017 }
85018 static inline __be32 __cpu_to_be32p(const __u32 *p)
85019 {
85020- return (__force __be32)__swab32p(p);
85021+ return (__force const __be32)__swab32p(p);
85022 }
85023 static inline __u32 __be32_to_cpup(const __be32 *p)
85024 {
85025- return __swab32p((__u32 *)p);
85026+ return __swab32p((const __u32 *)p);
85027 }
85028 static inline __be16 __cpu_to_be16p(const __u16 *p)
85029 {
85030- return (__force __be16)__swab16p(p);
85031+ return (__force const __be16)__swab16p(p);
85032 }
85033 static inline __u16 __be16_to_cpup(const __be16 *p)
85034 {
85035- return __swab16p((__u16 *)p);
85036+ return __swab16p((const __u16 *)p);
85037 }
85038 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
85039 #define __le64_to_cpus(x) do { (void)(x); } while (0)
85040diff --git a/include/linux/cache.h b/include/linux/cache.h
85041index 97e2488..e7576b9 100644
85042--- a/include/linux/cache.h
85043+++ b/include/linux/cache.h
85044@@ -16,6 +16,10 @@
85045 #define __read_mostly
85046 #endif
85047
85048+#ifndef __read_only
85049+#define __read_only __read_mostly
85050+#endif
85051+
85052 #ifndef ____cacheline_aligned
85053 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
85054 #endif
85055diff --git a/include/linux/capability.h b/include/linux/capability.h
85056index c8f2a5f7..1618a5c 100644
85057--- a/include/linux/capability.h
85058+++ b/include/linux/capability.h
85059@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set;
85060 (security_real_capable_noaudit((t), (cap)) == 0)
85061
85062 extern int capable(int cap);
85063+int capable_nolog(int cap);
85064
85065 /* audit system wants to get cap info from files as well */
85066 struct dentry;
85067diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
85068index 450fa59..246fa19 100644
85069--- a/include/linux/compiler-gcc4.h
85070+++ b/include/linux/compiler-gcc4.h
85071@@ -14,6 +14,9 @@
85072 #define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
85073 #define __always_inline inline __attribute__((always_inline))
85074
85075+#ifdef SIZE_OVERFLOW_PLUGIN
85076+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
85077+#endif
85078 /*
85079 * A trick to suppress uninitialized variable warning without generating any
85080 * code
85081@@ -36,4 +39,16 @@
85082 the kernel context */
85083 #define __cold __attribute__((__cold__))
85084
85085+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
85086+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
85087+#define __bos0(ptr) __bos((ptr), 0)
85088+#define __bos1(ptr) __bos((ptr), 1)
85089+
85090+#if __GNUC_MINOR__ >= 5
85091+#ifdef CONSTIFY_PLUGIN
85092+#define __no_const __attribute__((no_const))
85093+#define __do_const __attribute__((do_const))
85094+#endif
85095+#endif
85096+
85097 #endif
85098diff --git a/include/linux/compiler.h b/include/linux/compiler.h
85099index 04fb513..6189f3b 100644
85100--- a/include/linux/compiler.h
85101+++ b/include/linux/compiler.h
85102@@ -5,11 +5,14 @@
85103
85104 #ifdef __CHECKER__
85105 # define __user __attribute__((noderef, address_space(1)))
85106+# define __force_user __force __user
85107 # define __kernel /* default address space */
85108+# define __force_kernel __force __kernel
85109 # define __safe __attribute__((safe))
85110 # define __force __attribute__((force))
85111 # define __nocast __attribute__((nocast))
85112 # define __iomem __attribute__((noderef, address_space(2)))
85113+# define __force_iomem __force __iomem
85114 # define __acquires(x) __attribute__((context(x,0,1)))
85115 # define __releases(x) __attribute__((context(x,1,0)))
85116 # define __acquire(x) __context__(x,1)
85117@@ -17,13 +20,34 @@
85118 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
85119 extern void __chk_user_ptr(const volatile void __user *);
85120 extern void __chk_io_ptr(const volatile void __iomem *);
85121+#elif defined(CHECKER_PLUGIN)
85122+//# define __user
85123+//# define __force_user
85124+//# define __kernel
85125+//# define __force_kernel
85126+# define __safe
85127+# define __force
85128+# define __nocast
85129+# define __iomem
85130+# define __force_iomem
85131+# define __chk_user_ptr(x) (void)0
85132+# define __chk_io_ptr(x) (void)0
85133+# define __builtin_warning(x, y...) (1)
85134+# define __acquires(x)
85135+# define __releases(x)
85136+# define __acquire(x) (void)0
85137+# define __release(x) (void)0
85138+# define __cond_lock(x,c) (c)
85139 #else
85140 # define __user
85141+# define __force_user
85142 # define __kernel
85143+# define __force_kernel
85144 # define __safe
85145 # define __force
85146 # define __nocast
85147 # define __iomem
85148+# define __force_iomem
85149 # define __chk_user_ptr(x) (void)0
85150 # define __chk_io_ptr(x) (void)0
85151 # define __builtin_warning(x, y...) (1)
85152@@ -247,6 +271,17 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
85153 # define __attribute_const__ /* unimplemented */
85154 #endif
85155
85156+#ifndef __no_const
85157+# define __no_const
85158+#endif
85159+
85160+#ifndef __do_const
85161+# define __do_const
85162+#endif
85163+
85164+#ifndef __size_overflow
85165+# define __size_overflow(...)
85166+#endif
85167 /*
85168 * Tell gcc if a function is cold. The compiler will assume any path
85169 * directly leading to the call is unlikely.
85170@@ -256,6 +291,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
85171 #define __cold
85172 #endif
85173
85174+#ifndef __alloc_size
85175+#define __alloc_size(...)
85176+#endif
85177+
85178+#ifndef __bos
85179+#define __bos(ptr, arg)
85180+#endif
85181+
85182+#ifndef __bos0
85183+#define __bos0(ptr)
85184+#endif
85185+
85186+#ifndef __bos1
85187+#define __bos1(ptr)
85188+#endif
85189+
85190 /* Simple shorthand for a section definition */
85191 #ifndef __section
85192 # define __section(S) __attribute__ ((__section__(#S)))
85193@@ -278,6 +329,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
85194 * use is to mediate communication between process-level code and irq/NMI
85195 * handlers, all running on the same CPU.
85196 */
85197-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
85198+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
85199+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
85200
85201 #endif /* __LINUX_COMPILER_H */
85202diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
85203index 0026f26..6c237c5 100644
85204--- a/include/linux/crash_dump.h
85205+++ b/include/linux/crash_dump.h
85206@@ -12,7 +12,7 @@
85207 extern unsigned long long elfcorehdr_addr;
85208
85209 extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
85210- unsigned long, int);
85211+ unsigned long, int) __size_overflow(3);
85212
85213 /* Architecture code defines this if there are other possible ELF
85214 * machine types, e.g. on bi-arch capable hardware. */
85215diff --git a/include/linux/crypto.h b/include/linux/crypto.h
85216index fd92988..a3164bd 100644
85217--- a/include/linux/crypto.h
85218+++ b/include/linux/crypto.h
85219@@ -394,7 +394,7 @@ struct cipher_tfm {
85220 const u8 *key, unsigned int keylen);
85221 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
85222 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
85223-};
85224+} __no_const;
85225
85226 struct hash_tfm {
85227 int (*init)(struct hash_desc *desc);
85228@@ -415,13 +415,13 @@ struct compress_tfm {
85229 int (*cot_decompress)(struct crypto_tfm *tfm,
85230 const u8 *src, unsigned int slen,
85231 u8 *dst, unsigned int *dlen);
85232-};
85233+} __no_const;
85234
85235 struct rng_tfm {
85236 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
85237 unsigned int dlen);
85238 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
85239-};
85240+} __no_const;
85241
85242 #define crt_ablkcipher crt_u.ablkcipher
85243 #define crt_aead crt_u.aead
85244diff --git a/include/linux/dcache.h b/include/linux/dcache.h
85245index 30b93b2..cd7a8db 100644
85246--- a/include/linux/dcache.h
85247+++ b/include/linux/dcache.h
85248@@ -119,6 +119,8 @@ struct dentry {
85249 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
85250 };
85251
85252+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
85253+
85254 /*
85255 * dentry->d_lock spinlock nesting subclasses:
85256 *
85257diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
85258index 3e9bd6a..f4e1aa0 100644
85259--- a/include/linux/decompress/mm.h
85260+++ b/include/linux/decompress/mm.h
85261@@ -78,7 +78,7 @@ static void free(void *where)
85262 * warnings when not needed (indeed large_malloc / large_free are not
85263 * needed by inflate */
85264
85265-#define malloc(a) kmalloc(a, GFP_KERNEL)
85266+#define malloc(a) kmalloc((a), GFP_KERNEL)
85267 #define free(a) kfree(a)
85268
85269 #define large_malloc(a) vmalloc(a)
85270diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
85271index 91b7618..92a93d32 100644
85272--- a/include/linux/dma-mapping.h
85273+++ b/include/linux/dma-mapping.h
85274@@ -16,51 +16,51 @@ enum dma_data_direction {
85275 };
85276
85277 struct dma_map_ops {
85278- void* (*alloc_coherent)(struct device *dev, size_t size,
85279+ void* (* const alloc_coherent)(struct device *dev, size_t size,
85280 dma_addr_t *dma_handle, gfp_t gfp);
85281- void (*free_coherent)(struct device *dev, size_t size,
85282+ void (* const free_coherent)(struct device *dev, size_t size,
85283 void *vaddr, dma_addr_t dma_handle);
85284- dma_addr_t (*map_page)(struct device *dev, struct page *page,
85285+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
85286 unsigned long offset, size_t size,
85287 enum dma_data_direction dir,
85288 struct dma_attrs *attrs);
85289- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
85290+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
85291 size_t size, enum dma_data_direction dir,
85292 struct dma_attrs *attrs);
85293- int (*map_sg)(struct device *dev, struct scatterlist *sg,
85294+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
85295 int nents, enum dma_data_direction dir,
85296 struct dma_attrs *attrs);
85297- void (*unmap_sg)(struct device *dev,
85298+ void (* const unmap_sg)(struct device *dev,
85299 struct scatterlist *sg, int nents,
85300 enum dma_data_direction dir,
85301 struct dma_attrs *attrs);
85302- void (*sync_single_for_cpu)(struct device *dev,
85303+ void (* const sync_single_for_cpu)(struct device *dev,
85304 dma_addr_t dma_handle, size_t size,
85305 enum dma_data_direction dir);
85306- void (*sync_single_for_device)(struct device *dev,
85307+ void (* const sync_single_for_device)(struct device *dev,
85308 dma_addr_t dma_handle, size_t size,
85309 enum dma_data_direction dir);
85310- void (*sync_single_range_for_cpu)(struct device *dev,
85311+ void (* const sync_single_range_for_cpu)(struct device *dev,
85312 dma_addr_t dma_handle,
85313 unsigned long offset,
85314 size_t size,
85315 enum dma_data_direction dir);
85316- void (*sync_single_range_for_device)(struct device *dev,
85317+ void (* const sync_single_range_for_device)(struct device *dev,
85318 dma_addr_t dma_handle,
85319 unsigned long offset,
85320 size_t size,
85321 enum dma_data_direction dir);
85322- void (*sync_sg_for_cpu)(struct device *dev,
85323+ void (* const sync_sg_for_cpu)(struct device *dev,
85324 struct scatterlist *sg, int nents,
85325 enum dma_data_direction dir);
85326- void (*sync_sg_for_device)(struct device *dev,
85327+ void (* const sync_sg_for_device)(struct device *dev,
85328 struct scatterlist *sg, int nents,
85329 enum dma_data_direction dir);
85330- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
85331- int (*dma_supported)(struct device *dev, u64 mask);
85332+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
85333+ int (* const dma_supported)(struct device *dev, u64 mask);
85334 int (*set_dma_mask)(struct device *dev, u64 mask);
85335 int is_phys;
85336-};
85337+} __do_const;
85338
85339 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
85340
85341diff --git a/include/linux/dst.h b/include/linux/dst.h
85342index e26fed8..b976d9f 100644
85343--- a/include/linux/dst.h
85344+++ b/include/linux/dst.h
85345@@ -380,7 +380,7 @@ struct dst_node
85346 struct thread_pool *pool;
85347
85348 /* Transaction IDs live here */
85349- atomic_long_t gen;
85350+ atomic_long_unchecked_t gen;
85351
85352 /*
85353 * How frequently and how many times transaction
85354diff --git a/include/linux/elf.h b/include/linux/elf.h
85355index 90a4ed0..d652617 100644
85356--- a/include/linux/elf.h
85357+++ b/include/linux/elf.h
85358@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
85359 #define PT_GNU_EH_FRAME 0x6474e550
85360
85361 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
85362+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
85363+
85364+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
85365+
85366+/* Constants for the e_flags field */
85367+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
85368+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
85369+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
85370+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
85371+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
85372+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
85373
85374 /* These constants define the different elf file types */
85375 #define ET_NONE 0
85376@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
85377 #define DT_DEBUG 21
85378 #define DT_TEXTREL 22
85379 #define DT_JMPREL 23
85380+#define DT_FLAGS 30
85381+ #define DF_TEXTREL 0x00000004
85382 #define DT_ENCODING 32
85383 #define OLD_DT_LOOS 0x60000000
85384 #define DT_LOOS 0x6000000d
85385@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
85386 #define PF_W 0x2
85387 #define PF_X 0x1
85388
85389+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
85390+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
85391+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
85392+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
85393+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
85394+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
85395+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
85396+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
85397+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
85398+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
85399+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
85400+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
85401+
85402 typedef struct elf32_phdr{
85403 Elf32_Word p_type;
85404 Elf32_Off p_offset;
85405@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
85406 #define EI_OSABI 7
85407 #define EI_PAD 8
85408
85409+#define EI_PAX 14
85410+
85411 #define ELFMAG0 0x7f /* EI_MAG */
85412 #define ELFMAG1 'E'
85413 #define ELFMAG2 'L'
85414@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
85415 #define elf_phdr elf32_phdr
85416 #define elf_note elf32_note
85417 #define elf_addr_t Elf32_Off
85418+#define elf_dyn Elf32_Dyn
85419
85420 #else
85421
85422@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
85423 #define elf_phdr elf64_phdr
85424 #define elf_note elf64_note
85425 #define elf_addr_t Elf64_Off
85426+#define elf_dyn Elf64_Dyn
85427
85428 #endif
85429
85430diff --git a/include/linux/fs.h b/include/linux/fs.h
85431index 1b9a47a..6fe2934 100644
85432--- a/include/linux/fs.h
85433+++ b/include/linux/fs.h
85434@@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
85435 unsigned long, unsigned long);
85436
85437 struct address_space_operations {
85438- int (*writepage)(struct page *page, struct writeback_control *wbc);
85439- int (*readpage)(struct file *, struct page *);
85440- void (*sync_page)(struct page *);
85441+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
85442+ int (* const readpage)(struct file *, struct page *);
85443+ void (* const sync_page)(struct page *);
85444
85445 /* Write back some dirty pages from this mapping. */
85446- int (*writepages)(struct address_space *, struct writeback_control *);
85447+ int (* const writepages)(struct address_space *, struct writeback_control *);
85448
85449 /* Set a page dirty. Return true if this dirtied it */
85450- int (*set_page_dirty)(struct page *page);
85451+ int (* const set_page_dirty)(struct page *page);
85452
85453- int (*readpages)(struct file *filp, struct address_space *mapping,
85454+ int (* const readpages)(struct file *filp, struct address_space *mapping,
85455 struct list_head *pages, unsigned nr_pages);
85456
85457- int (*write_begin)(struct file *, struct address_space *mapping,
85458+ int (* const write_begin)(struct file *, struct address_space *mapping,
85459 loff_t pos, unsigned len, unsigned flags,
85460 struct page **pagep, void **fsdata);
85461- int (*write_end)(struct file *, struct address_space *mapping,
85462+ int (* const write_end)(struct file *, struct address_space *mapping,
85463 loff_t pos, unsigned len, unsigned copied,
85464 struct page *page, void *fsdata);
85465
85466 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
85467- sector_t (*bmap)(struct address_space *, sector_t);
85468- void (*invalidatepage) (struct page *, unsigned long);
85469- int (*releasepage) (struct page *, gfp_t);
85470- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
85471+ sector_t (* const bmap)(struct address_space *, sector_t);
85472+ void (* const invalidatepage) (struct page *, unsigned long);
85473+ int (* const releasepage) (struct page *, gfp_t);
85474+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
85475 loff_t offset, unsigned long nr_segs);
85476- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
85477+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
85478 void **, unsigned long *);
85479 /* migrate the contents of a page to the specified target */
85480- int (*migratepage) (struct address_space *,
85481+ int (* const migratepage) (struct address_space *,
85482 struct page *, struct page *);
85483- int (*launder_page) (struct page *);
85484- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
85485+ int (* const launder_page) (struct page *);
85486+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
85487 unsigned long);
85488- int (*error_remove_page)(struct address_space *, struct page *);
85489+ int (* const error_remove_page)(struct address_space *, struct page *);
85490 };
85491
85492 /*
85493@@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp)
85494 typedef struct files_struct *fl_owner_t;
85495
85496 struct file_lock_operations {
85497- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
85498- void (*fl_release_private)(struct file_lock *);
85499+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
85500+ void (* const fl_release_private)(struct file_lock *);
85501 };
85502
85503 struct lock_manager_operations {
85504- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
85505- void (*fl_notify)(struct file_lock *); /* unblock callback */
85506- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
85507- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
85508- void (*fl_release_private)(struct file_lock *);
85509- void (*fl_break)(struct file_lock *);
85510- int (*fl_mylease)(struct file_lock *, struct file_lock *);
85511- int (*fl_change)(struct file_lock **, int);
85512+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
85513+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
85514+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
85515+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
85516+ void (* const fl_release_private)(struct file_lock *);
85517+ void (* const fl_break)(struct file_lock *);
85518+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
85519+ int (* const fl_change)(struct file_lock **, int);
85520 };
85521
85522 struct lock_manager {
85523@@ -1442,7 +1442,7 @@ struct fiemap_extent_info {
85524 unsigned int fi_flags; /* Flags as passed from user */
85525 unsigned int fi_extents_mapped; /* Number of mapped extents */
85526 unsigned int fi_extents_max; /* Size of fiemap_extent array */
85527- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
85528+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
85529 * array */
85530 };
85531 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
85532@@ -1512,7 +1512,8 @@ struct file_operations {
85533 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
85534 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
85535 int (*setlease)(struct file *, long, struct file_lock **);
85536-};
85537+} __do_const;
85538+typedef struct file_operations __no_const file_operations_no_const;
85539
85540 struct inode_operations {
85541 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
85542@@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
85543 unsigned long, loff_t *);
85544
85545 struct super_operations {
85546- struct inode *(*alloc_inode)(struct super_block *sb);
85547- void (*destroy_inode)(struct inode *);
85548+ struct inode *(* const alloc_inode)(struct super_block *sb);
85549+ void (* const destroy_inode)(struct inode *);
85550
85551- void (*dirty_inode) (struct inode *);
85552- int (*write_inode) (struct inode *, int);
85553- void (*drop_inode) (struct inode *);
85554- void (*delete_inode) (struct inode *);
85555- void (*put_super) (struct super_block *);
85556- void (*write_super) (struct super_block *);
85557- int (*sync_fs)(struct super_block *sb, int wait);
85558- int (*freeze_fs) (struct super_block *);
85559- int (*unfreeze_fs) (struct super_block *);
85560- int (*statfs) (struct dentry *, struct kstatfs *);
85561- int (*remount_fs) (struct super_block *, int *, char *);
85562- void (*clear_inode) (struct inode *);
85563- void (*umount_begin) (struct super_block *);
85564+ void (* const dirty_inode) (struct inode *);
85565+ int (* const write_inode) (struct inode *, int);
85566+ void (* const drop_inode) (struct inode *);
85567+ void (* const delete_inode) (struct inode *);
85568+ void (* const put_super) (struct super_block *);
85569+ void (* const write_super) (struct super_block *);
85570+ int (* const sync_fs)(struct super_block *sb, int wait);
85571+ int (* const freeze_fs) (struct super_block *);
85572+ int (* const unfreeze_fs) (struct super_block *);
85573+ int (* const statfs) (struct dentry *, struct kstatfs *);
85574+ int (* const remount_fs) (struct super_block *, int *, char *);
85575+ void (* const clear_inode) (struct inode *);
85576+ void (* const umount_begin) (struct super_block *);
85577
85578- int (*show_options)(struct seq_file *, struct vfsmount *);
85579- int (*show_stats)(struct seq_file *, struct vfsmount *);
85580+ int (* const show_options)(struct seq_file *, struct vfsmount *);
85581+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
85582 #ifdef CONFIG_QUOTA
85583- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
85584- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
85585+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
85586+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
85587 #endif
85588- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
85589+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
85590 };
85591
85592 /*
85593diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
85594index 78a05bf..2a7d3e1 100644
85595--- a/include/linux/fs_struct.h
85596+++ b/include/linux/fs_struct.h
85597@@ -4,7 +4,7 @@
85598 #include <linux/path.h>
85599
85600 struct fs_struct {
85601- int users;
85602+ atomic_t users;
85603 rwlock_t lock;
85604 int umask;
85605 int in_exec;
85606diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
85607index 7be0c6f..2f63a2b 100644
85608--- a/include/linux/fscache-cache.h
85609+++ b/include/linux/fscache-cache.h
85610@@ -116,7 +116,7 @@ struct fscache_operation {
85611 #endif
85612 };
85613
85614-extern atomic_t fscache_op_debug_id;
85615+extern atomic_unchecked_t fscache_op_debug_id;
85616 extern const struct slow_work_ops fscache_op_slow_work_ops;
85617
85618 extern void fscache_enqueue_operation(struct fscache_operation *);
85619@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
85620 fscache_operation_release_t release)
85621 {
85622 atomic_set(&op->usage, 1);
85623- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
85624+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
85625 op->release = release;
85626 INIT_LIST_HEAD(&op->pend_link);
85627 fscache_set_op_state(op, "Init");
85628diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
85629index 4d6f47b..00bcedb 100644
85630--- a/include/linux/fsnotify_backend.h
85631+++ b/include/linux/fsnotify_backend.h
85632@@ -86,6 +86,7 @@ struct fsnotify_ops {
85633 void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
85634 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
85635 };
85636+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
85637
85638 /*
85639 * A group is a "thing" that wants to receive notification about filesystem
85640diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
85641index 4ec5e67..42f1eb9 100644
85642--- a/include/linux/ftrace_event.h
85643+++ b/include/linux/ftrace_event.h
85644@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call,
85645 int filter_type);
85646 extern int trace_define_common_fields(struct ftrace_event_call *call);
85647
85648-#define is_signed_type(type) (((type)(-1)) < 0)
85649+#define is_signed_type(type) (((type)(-1)) < (type)1)
85650
85651 int trace_set_clr_event(const char *system, const char *event, int set);
85652
85653diff --git a/include/linux/genhd.h b/include/linux/genhd.h
85654index 297df45..b6a74ff 100644
85655--- a/include/linux/genhd.h
85656+++ b/include/linux/genhd.h
85657@@ -161,7 +161,7 @@ struct gendisk {
85658
85659 struct timer_rand_state *random;
85660
85661- atomic_t sync_io; /* RAID */
85662+ atomic_unchecked_t sync_io; /* RAID */
85663 struct work_struct async_notify;
85664 #ifdef CONFIG_BLK_DEV_INTEGRITY
85665 struct blk_integrity *integrity;
85666diff --git a/include/linux/gracl.h b/include/linux/gracl.h
85667new file mode 100644
85668index 0000000..af663cf
85669--- /dev/null
85670+++ b/include/linux/gracl.h
85671@@ -0,0 +1,319 @@
85672+#ifndef GR_ACL_H
85673+#define GR_ACL_H
85674+
85675+#include <linux/grdefs.h>
85676+#include <linux/resource.h>
85677+#include <linux/capability.h>
85678+#include <linux/dcache.h>
85679+#include <asm/resource.h>
85680+
85681+/* Major status information */
85682+
85683+#define GR_VERSION "grsecurity 2.9"
85684+#define GRSECURITY_VERSION 0x2900
85685+
85686+enum {
85687+ GR_SHUTDOWN = 0,
85688+ GR_ENABLE = 1,
85689+ GR_SPROLE = 2,
85690+ GR_RELOAD = 3,
85691+ GR_SEGVMOD = 4,
85692+ GR_STATUS = 5,
85693+ GR_UNSPROLE = 6,
85694+ GR_PASSSET = 7,
85695+ GR_SPROLEPAM = 8,
85696+};
85697+
85698+/* Password setup definitions
85699+ * kernel/grhash.c */
85700+enum {
85701+ GR_PW_LEN = 128,
85702+ GR_SALT_LEN = 16,
85703+ GR_SHA_LEN = 32,
85704+};
85705+
85706+enum {
85707+ GR_SPROLE_LEN = 64,
85708+};
85709+
85710+enum {
85711+ GR_NO_GLOB = 0,
85712+ GR_REG_GLOB,
85713+ GR_CREATE_GLOB
85714+};
85715+
85716+#define GR_NLIMITS 32
85717+
85718+/* Begin Data Structures */
85719+
85720+struct sprole_pw {
85721+ unsigned char *rolename;
85722+ unsigned char salt[GR_SALT_LEN];
85723+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
85724+};
85725+
85726+struct name_entry {
85727+ __u32 key;
85728+ ino_t inode;
85729+ dev_t device;
85730+ char *name;
85731+ __u16 len;
85732+ __u8 deleted;
85733+ struct name_entry *prev;
85734+ struct name_entry *next;
85735+};
85736+
85737+struct inodev_entry {
85738+ struct name_entry *nentry;
85739+ struct inodev_entry *prev;
85740+ struct inodev_entry *next;
85741+};
85742+
85743+struct acl_role_db {
85744+ struct acl_role_label **r_hash;
85745+ __u32 r_size;
85746+};
85747+
85748+struct inodev_db {
85749+ struct inodev_entry **i_hash;
85750+ __u32 i_size;
85751+};
85752+
85753+struct name_db {
85754+ struct name_entry **n_hash;
85755+ __u32 n_size;
85756+};
85757+
85758+struct crash_uid {
85759+ uid_t uid;
85760+ unsigned long expires;
85761+};
85762+
85763+struct gr_hash_struct {
85764+ void **table;
85765+ void **nametable;
85766+ void *first;
85767+ __u32 table_size;
85768+ __u32 used_size;
85769+ int type;
85770+};
85771+
85772+/* Userspace Grsecurity ACL data structures */
85773+
85774+struct acl_subject_label {
85775+ char *filename;
85776+ ino_t inode;
85777+ dev_t device;
85778+ __u32 mode;
85779+ kernel_cap_t cap_mask;
85780+ kernel_cap_t cap_lower;
85781+ kernel_cap_t cap_invert_audit;
85782+
85783+ struct rlimit res[GR_NLIMITS];
85784+ __u32 resmask;
85785+
85786+ __u8 user_trans_type;
85787+ __u8 group_trans_type;
85788+ uid_t *user_transitions;
85789+ gid_t *group_transitions;
85790+ __u16 user_trans_num;
85791+ __u16 group_trans_num;
85792+
85793+ __u32 sock_families[2];
85794+ __u32 ip_proto[8];
85795+ __u32 ip_type;
85796+ struct acl_ip_label **ips;
85797+ __u32 ip_num;
85798+ __u32 inaddr_any_override;
85799+
85800+ __u32 crashes;
85801+ unsigned long expires;
85802+
85803+ struct acl_subject_label *parent_subject;
85804+ struct gr_hash_struct *hash;
85805+ struct acl_subject_label *prev;
85806+ struct acl_subject_label *next;
85807+
85808+ struct acl_object_label **obj_hash;
85809+ __u32 obj_hash_size;
85810+ __u16 pax_flags;
85811+};
85812+
85813+struct role_allowed_ip {
85814+ __u32 addr;
85815+ __u32 netmask;
85816+
85817+ struct role_allowed_ip *prev;
85818+ struct role_allowed_ip *next;
85819+};
85820+
85821+struct role_transition {
85822+ char *rolename;
85823+
85824+ struct role_transition *prev;
85825+ struct role_transition *next;
85826+};
85827+
85828+struct acl_role_label {
85829+ char *rolename;
85830+ uid_t uidgid;
85831+ __u16 roletype;
85832+
85833+ __u16 auth_attempts;
85834+ unsigned long expires;
85835+
85836+ struct acl_subject_label *root_label;
85837+ struct gr_hash_struct *hash;
85838+
85839+ struct acl_role_label *prev;
85840+ struct acl_role_label *next;
85841+
85842+ struct role_transition *transitions;
85843+ struct role_allowed_ip *allowed_ips;
85844+ uid_t *domain_children;
85845+ __u16 domain_child_num;
85846+
85847+ mode_t umask;
85848+
85849+ struct acl_subject_label **subj_hash;
85850+ __u32 subj_hash_size;
85851+};
85852+
85853+struct user_acl_role_db {
85854+ struct acl_role_label **r_table;
85855+ __u32 num_pointers; /* Number of allocations to track */
85856+ __u32 num_roles; /* Number of roles */
85857+ __u32 num_domain_children; /* Number of domain children */
85858+ __u32 num_subjects; /* Number of subjects */
85859+ __u32 num_objects; /* Number of objects */
85860+};
85861+
85862+struct acl_object_label {
85863+ char *filename;
85864+ ino_t inode;
85865+ dev_t device;
85866+ __u32 mode;
85867+
85868+ struct acl_subject_label *nested;
85869+ struct acl_object_label *globbed;
85870+
85871+ /* next two structures not used */
85872+
85873+ struct acl_object_label *prev;
85874+ struct acl_object_label *next;
85875+};
85876+
85877+struct acl_ip_label {
85878+ char *iface;
85879+ __u32 addr;
85880+ __u32 netmask;
85881+ __u16 low, high;
85882+ __u8 mode;
85883+ __u32 type;
85884+ __u32 proto[8];
85885+
85886+ /* next two structures not used */
85887+
85888+ struct acl_ip_label *prev;
85889+ struct acl_ip_label *next;
85890+};
85891+
85892+struct gr_arg {
85893+ struct user_acl_role_db role_db;
85894+ unsigned char pw[GR_PW_LEN];
85895+ unsigned char salt[GR_SALT_LEN];
85896+ unsigned char sum[GR_SHA_LEN];
85897+ unsigned char sp_role[GR_SPROLE_LEN];
85898+ struct sprole_pw *sprole_pws;
85899+ dev_t segv_device;
85900+ ino_t segv_inode;
85901+ uid_t segv_uid;
85902+ __u16 num_sprole_pws;
85903+ __u16 mode;
85904+};
85905+
85906+struct gr_arg_wrapper {
85907+ struct gr_arg *arg;
85908+ __u32 version;
85909+ __u32 size;
85910+};
85911+
85912+struct subject_map {
85913+ struct acl_subject_label *user;
85914+ struct acl_subject_label *kernel;
85915+ struct subject_map *prev;
85916+ struct subject_map *next;
85917+};
85918+
85919+struct acl_subj_map_db {
85920+ struct subject_map **s_hash;
85921+ __u32 s_size;
85922+};
85923+
85924+/* End Data Structures Section */
85925+
85926+/* Hash functions generated by empirical testing by Brad Spengler
85927+ Makes good use of the low bits of the inode. Generally 0-1 times
85928+ in loop for successful match. 0-3 for unsuccessful match.
85929+ Shift/add algorithm with modulus of table size and an XOR*/
85930+
85931+static __inline__ unsigned int
85932+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
85933+{
85934+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
85935+}
85936+
85937+ static __inline__ unsigned int
85938+shash(const struct acl_subject_label *userp, const unsigned int sz)
85939+{
85940+ return ((const unsigned long)userp % sz);
85941+}
85942+
85943+static __inline__ unsigned int
85944+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
85945+{
85946+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
85947+}
85948+
85949+static __inline__ unsigned int
85950+nhash(const char *name, const __u16 len, const unsigned int sz)
85951+{
85952+ return full_name_hash((const unsigned char *)name, len) % sz;
85953+}
85954+
85955+#define FOR_EACH_ROLE_START(role) \
85956+ role = role_list; \
85957+ while (role) {
85958+
85959+#define FOR_EACH_ROLE_END(role) \
85960+ role = role->prev; \
85961+ }
85962+
85963+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
85964+ subj = NULL; \
85965+ iter = 0; \
85966+ while (iter < role->subj_hash_size) { \
85967+ if (subj == NULL) \
85968+ subj = role->subj_hash[iter]; \
85969+ if (subj == NULL) { \
85970+ iter++; \
85971+ continue; \
85972+ }
85973+
85974+#define FOR_EACH_SUBJECT_END(subj,iter) \
85975+ subj = subj->next; \
85976+ if (subj == NULL) \
85977+ iter++; \
85978+ }
85979+
85980+
85981+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
85982+ subj = role->hash->first; \
85983+ while (subj != NULL) {
85984+
85985+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
85986+ subj = subj->next; \
85987+ }
85988+
85989+#endif
85990+
85991diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
85992new file mode 100644
85993index 0000000..323ecf2
85994--- /dev/null
85995+++ b/include/linux/gralloc.h
85996@@ -0,0 +1,9 @@
85997+#ifndef __GRALLOC_H
85998+#define __GRALLOC_H
85999+
86000+void acl_free_all(void);
86001+int acl_alloc_stack_init(unsigned long size);
86002+void *acl_alloc(unsigned long len);
86003+void *acl_alloc_num(unsigned long num, unsigned long len);
86004+
86005+#endif
86006diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
86007new file mode 100644
86008index 0000000..70d6cd5
86009--- /dev/null
86010+++ b/include/linux/grdefs.h
86011@@ -0,0 +1,140 @@
86012+#ifndef GRDEFS_H
86013+#define GRDEFS_H
86014+
86015+/* Begin grsecurity status declarations */
86016+
86017+enum {
86018+ GR_READY = 0x01,
86019+ GR_STATUS_INIT = 0x00 // disabled state
86020+};
86021+
86022+/* Begin ACL declarations */
86023+
86024+/* Role flags */
86025+
86026+enum {
86027+ GR_ROLE_USER = 0x0001,
86028+ GR_ROLE_GROUP = 0x0002,
86029+ GR_ROLE_DEFAULT = 0x0004,
86030+ GR_ROLE_SPECIAL = 0x0008,
86031+ GR_ROLE_AUTH = 0x0010,
86032+ GR_ROLE_NOPW = 0x0020,
86033+ GR_ROLE_GOD = 0x0040,
86034+ GR_ROLE_LEARN = 0x0080,
86035+ GR_ROLE_TPE = 0x0100,
86036+ GR_ROLE_DOMAIN = 0x0200,
86037+ GR_ROLE_PAM = 0x0400,
86038+ GR_ROLE_PERSIST = 0x800
86039+};
86040+
86041+/* ACL Subject and Object mode flags */
86042+enum {
86043+ GR_DELETED = 0x80000000
86044+};
86045+
86046+/* ACL Object-only mode flags */
86047+enum {
86048+ GR_READ = 0x00000001,
86049+ GR_APPEND = 0x00000002,
86050+ GR_WRITE = 0x00000004,
86051+ GR_EXEC = 0x00000008,
86052+ GR_FIND = 0x00000010,
86053+ GR_INHERIT = 0x00000020,
86054+ GR_SETID = 0x00000040,
86055+ GR_CREATE = 0x00000080,
86056+ GR_DELETE = 0x00000100,
86057+ GR_LINK = 0x00000200,
86058+ GR_AUDIT_READ = 0x00000400,
86059+ GR_AUDIT_APPEND = 0x00000800,
86060+ GR_AUDIT_WRITE = 0x00001000,
86061+ GR_AUDIT_EXEC = 0x00002000,
86062+ GR_AUDIT_FIND = 0x00004000,
86063+ GR_AUDIT_INHERIT= 0x00008000,
86064+ GR_AUDIT_SETID = 0x00010000,
86065+ GR_AUDIT_CREATE = 0x00020000,
86066+ GR_AUDIT_DELETE = 0x00040000,
86067+ GR_AUDIT_LINK = 0x00080000,
86068+ GR_PTRACERD = 0x00100000,
86069+ GR_NOPTRACE = 0x00200000,
86070+ GR_SUPPRESS = 0x00400000,
86071+ GR_NOLEARN = 0x00800000,
86072+ GR_INIT_TRANSFER= 0x01000000
86073+};
86074+
86075+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
86076+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
86077+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
86078+
86079+/* ACL subject-only mode flags */
86080+enum {
86081+ GR_KILL = 0x00000001,
86082+ GR_VIEW = 0x00000002,
86083+ GR_PROTECTED = 0x00000004,
86084+ GR_LEARN = 0x00000008,
86085+ GR_OVERRIDE = 0x00000010,
86086+ /* just a placeholder, this mode is only used in userspace */
86087+ GR_DUMMY = 0x00000020,
86088+ GR_PROTSHM = 0x00000040,
86089+ GR_KILLPROC = 0x00000080,
86090+ GR_KILLIPPROC = 0x00000100,
86091+ /* just a placeholder, this mode is only used in userspace */
86092+ GR_NOTROJAN = 0x00000200,
86093+ GR_PROTPROCFD = 0x00000400,
86094+ GR_PROCACCT = 0x00000800,
86095+ GR_RELAXPTRACE = 0x00001000,
86096+ GR_NESTED = 0x00002000,
86097+ GR_INHERITLEARN = 0x00004000,
86098+ GR_PROCFIND = 0x00008000,
86099+ GR_POVERRIDE = 0x00010000,
86100+ GR_KERNELAUTH = 0x00020000,
86101+ GR_ATSECURE = 0x00040000,
86102+ GR_SHMEXEC = 0x00080000
86103+};
86104+
86105+enum {
86106+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
86107+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
86108+ GR_PAX_ENABLE_MPROTECT = 0x0004,
86109+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
86110+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
86111+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
86112+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
86113+ GR_PAX_DISABLE_MPROTECT = 0x0400,
86114+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
86115+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
86116+};
86117+
86118+enum {
86119+ GR_ID_USER = 0x01,
86120+ GR_ID_GROUP = 0x02,
86121+};
86122+
86123+enum {
86124+ GR_ID_ALLOW = 0x01,
86125+ GR_ID_DENY = 0x02,
86126+};
86127+
86128+#define GR_CRASH_RES 31
86129+#define GR_UIDTABLE_MAX 500
86130+
86131+/* begin resource learning section */
86132+enum {
86133+ GR_RLIM_CPU_BUMP = 60,
86134+ GR_RLIM_FSIZE_BUMP = 50000,
86135+ GR_RLIM_DATA_BUMP = 10000,
86136+ GR_RLIM_STACK_BUMP = 1000,
86137+ GR_RLIM_CORE_BUMP = 10000,
86138+ GR_RLIM_RSS_BUMP = 500000,
86139+ GR_RLIM_NPROC_BUMP = 1,
86140+ GR_RLIM_NOFILE_BUMP = 5,
86141+ GR_RLIM_MEMLOCK_BUMP = 50000,
86142+ GR_RLIM_AS_BUMP = 500000,
86143+ GR_RLIM_LOCKS_BUMP = 2,
86144+ GR_RLIM_SIGPENDING_BUMP = 5,
86145+ GR_RLIM_MSGQUEUE_BUMP = 10000,
86146+ GR_RLIM_NICE_BUMP = 1,
86147+ GR_RLIM_RTPRIO_BUMP = 1,
86148+ GR_RLIM_RTTIME_BUMP = 1000000
86149+};
86150+
86151+#endif
86152diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
86153new file mode 100644
86154index 0000000..3826b91
86155--- /dev/null
86156+++ b/include/linux/grinternal.h
86157@@ -0,0 +1,219 @@
86158+#ifndef __GRINTERNAL_H
86159+#define __GRINTERNAL_H
86160+
86161+#ifdef CONFIG_GRKERNSEC
86162+
86163+#include <linux/fs.h>
86164+#include <linux/mnt_namespace.h>
86165+#include <linux/nsproxy.h>
86166+#include <linux/gracl.h>
86167+#include <linux/grdefs.h>
86168+#include <linux/grmsg.h>
86169+
86170+void gr_add_learn_entry(const char *fmt, ...)
86171+ __attribute__ ((format (printf, 1, 2)));
86172+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
86173+ const struct vfsmount *mnt);
86174+__u32 gr_check_create(const struct dentry *new_dentry,
86175+ const struct dentry *parent,
86176+ const struct vfsmount *mnt, const __u32 mode);
86177+int gr_check_protected_task(const struct task_struct *task);
86178+__u32 to_gr_audit(const __u32 reqmode);
86179+int gr_set_acls(const int type);
86180+int gr_apply_subject_to_task(struct task_struct *task);
86181+int gr_acl_is_enabled(void);
86182+char gr_roletype_to_char(void);
86183+
86184+void gr_handle_alertkill(struct task_struct *task);
86185+char *gr_to_filename(const struct dentry *dentry,
86186+ const struct vfsmount *mnt);
86187+char *gr_to_filename1(const struct dentry *dentry,
86188+ const struct vfsmount *mnt);
86189+char *gr_to_filename2(const struct dentry *dentry,
86190+ const struct vfsmount *mnt);
86191+char *gr_to_filename3(const struct dentry *dentry,
86192+ const struct vfsmount *mnt);
86193+
86194+extern int grsec_enable_ptrace_readexec;
86195+extern int grsec_enable_harden_ptrace;
86196+extern int grsec_enable_link;
86197+extern int grsec_enable_fifo;
86198+extern int grsec_enable_shm;
86199+extern int grsec_enable_execlog;
86200+extern int grsec_enable_signal;
86201+extern int grsec_enable_audit_ptrace;
86202+extern int grsec_enable_forkfail;
86203+extern int grsec_enable_time;
86204+extern int grsec_enable_rofs;
86205+extern int grsec_enable_chroot_shmat;
86206+extern int grsec_enable_chroot_mount;
86207+extern int grsec_enable_chroot_double;
86208+extern int grsec_enable_chroot_pivot;
86209+extern int grsec_enable_chroot_chdir;
86210+extern int grsec_enable_chroot_chmod;
86211+extern int grsec_enable_chroot_mknod;
86212+extern int grsec_enable_chroot_fchdir;
86213+extern int grsec_enable_chroot_nice;
86214+extern int grsec_enable_chroot_execlog;
86215+extern int grsec_enable_chroot_caps;
86216+extern int grsec_enable_chroot_sysctl;
86217+extern int grsec_enable_chroot_unix;
86218+extern int grsec_enable_tpe;
86219+extern int grsec_tpe_gid;
86220+extern int grsec_enable_tpe_all;
86221+extern int grsec_enable_tpe_invert;
86222+extern int grsec_enable_socket_all;
86223+extern int grsec_socket_all_gid;
86224+extern int grsec_enable_socket_client;
86225+extern int grsec_socket_client_gid;
86226+extern int grsec_enable_socket_server;
86227+extern int grsec_socket_server_gid;
86228+extern int grsec_audit_gid;
86229+extern int grsec_enable_group;
86230+extern int grsec_enable_audit_textrel;
86231+extern int grsec_enable_log_rwxmaps;
86232+extern int grsec_enable_mount;
86233+extern int grsec_enable_chdir;
86234+extern int grsec_resource_logging;
86235+extern int grsec_enable_blackhole;
86236+extern int grsec_lastack_retries;
86237+extern int grsec_enable_brute;
86238+extern int grsec_lock;
86239+
86240+extern spinlock_t grsec_alert_lock;
86241+extern unsigned long grsec_alert_wtime;
86242+extern unsigned long grsec_alert_fyet;
86243+
86244+extern spinlock_t grsec_audit_lock;
86245+
86246+extern rwlock_t grsec_exec_file_lock;
86247+
86248+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
86249+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
86250+ (tsk)->exec_file->f_vfsmnt) : "/")
86251+
86252+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
86253+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
86254+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
86255+
86256+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
86257+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
86258+ (tsk)->exec_file->f_vfsmnt) : "/")
86259+
86260+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
86261+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
86262+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
86263+
86264+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
86265+
86266+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
86267+
86268+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
86269+ (task)->pid, (cred)->uid, \
86270+ (cred)->euid, (cred)->gid, (cred)->egid, \
86271+ gr_parent_task_fullpath(task), \
86272+ (task)->real_parent->comm, (task)->real_parent->pid, \
86273+ (pcred)->uid, (pcred)->euid, \
86274+ (pcred)->gid, (pcred)->egid
86275+
86276+#define GR_CHROOT_CAPS {{ \
86277+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
86278+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
86279+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
86280+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
86281+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
86282+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
86283+ CAP_TO_MASK(CAP_MAC_ADMIN) }}
86284+
86285+#define security_learn(normal_msg,args...) \
86286+({ \
86287+ read_lock(&grsec_exec_file_lock); \
86288+ gr_add_learn_entry(normal_msg "\n", ## args); \
86289+ read_unlock(&grsec_exec_file_lock); \
86290+})
86291+
86292+enum {
86293+ GR_DO_AUDIT,
86294+ GR_DONT_AUDIT,
86295+ GR_DONT_AUDIT_GOOD
86296+};
86297+
86298+enum {
86299+ GR_TTYSNIFF,
86300+ GR_RBAC,
86301+ GR_RBAC_STR,
86302+ GR_STR_RBAC,
86303+ GR_RBAC_MODE2,
86304+ GR_RBAC_MODE3,
86305+ GR_FILENAME,
86306+ GR_SYSCTL_HIDDEN,
86307+ GR_NOARGS,
86308+ GR_ONE_INT,
86309+ GR_ONE_INT_TWO_STR,
86310+ GR_ONE_STR,
86311+ GR_STR_INT,
86312+ GR_TWO_STR_INT,
86313+ GR_TWO_INT,
86314+ GR_TWO_U64,
86315+ GR_THREE_INT,
86316+ GR_FIVE_INT_TWO_STR,
86317+ GR_TWO_STR,
86318+ GR_THREE_STR,
86319+ GR_FOUR_STR,
86320+ GR_STR_FILENAME,
86321+ GR_FILENAME_STR,
86322+ GR_FILENAME_TWO_INT,
86323+ GR_FILENAME_TWO_INT_STR,
86324+ GR_TEXTREL,
86325+ GR_PTRACE,
86326+ GR_RESOURCE,
86327+ GR_CAP,
86328+ GR_SIG,
86329+ GR_SIG2,
86330+ GR_CRASH1,
86331+ GR_CRASH2,
86332+ GR_PSACCT,
86333+ GR_RWXMAP
86334+};
86335+
86336+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
86337+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
86338+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
86339+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
86340+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
86341+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
86342+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
86343+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
86344+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
86345+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
86346+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
86347+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
86348+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
86349+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
86350+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
86351+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
86352+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
86353+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
86354+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
86355+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
86356+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
86357+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
86358+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
86359+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
86360+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
86361+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
86362+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
86363+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
86364+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
86365+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
86366+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
86367+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
86368+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
86369+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
86370+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
86371+
86372+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
86373+
86374+#endif
86375+
86376+#endif
86377diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
86378new file mode 100644
86379index 0000000..f885406
86380--- /dev/null
86381+++ b/include/linux/grmsg.h
86382@@ -0,0 +1,109 @@
86383+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
86384+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
86385+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
86386+#define GR_STOPMOD_MSG "denied modification of module state by "
86387+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
86388+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
86389+#define GR_IOPERM_MSG "denied use of ioperm() by "
86390+#define GR_IOPL_MSG "denied use of iopl() by "
86391+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
86392+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
86393+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
86394+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
86395+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
86396+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
86397+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
86398+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
86399+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
86400+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
86401+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
86402+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
86403+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
86404+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
86405+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
86406+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
86407+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
86408+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
86409+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
86410+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
86411+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
86412+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
86413+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
86414+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
86415+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
86416+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
86417+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
86418+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
86419+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
86420+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
86421+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
86422+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
86423+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
86424+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
86425+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
86426+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
86427+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
86428+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
86429+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
86430+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
86431+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
86432+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
86433+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
86434+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
86435+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
86436+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
86437+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
86438+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
86439+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
86440+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
86441+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
86442+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
86443+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
86444+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
86445+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
86446+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
86447+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
86448+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
86449+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
86450+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
86451+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
86452+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
86453+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
86454+#define GR_FAILFORK_MSG "failed fork with errno %s by "
86455+#define GR_NICE_CHROOT_MSG "denied priority change by "
86456+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
86457+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
86458+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
86459+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
86460+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
86461+#define GR_TIME_MSG "time set by "
86462+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
86463+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
86464+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
86465+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
86466+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
86467+#define GR_BIND_MSG "denied bind() by "
86468+#define GR_CONNECT_MSG "denied connect() by "
86469+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
86470+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
86471+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
86472+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
86473+#define GR_CAP_ACL_MSG "use of %s denied for "
86474+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
86475+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
86476+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
86477+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
86478+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
86479+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
86480+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
86481+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
86482+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
86483+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
86484+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
86485+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
86486+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
86487+#define GR_VM86_MSG "denied use of vm86 by "
86488+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
86489+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
86490+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
86491+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
86492diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
86493new file mode 100644
86494index 0000000..c1793ae
86495--- /dev/null
86496+++ b/include/linux/grsecurity.h
86497@@ -0,0 +1,219 @@
86498+#ifndef GR_SECURITY_H
86499+#define GR_SECURITY_H
86500+#include <linux/fs.h>
86501+#include <linux/fs_struct.h>
86502+#include <linux/binfmts.h>
86503+#include <linux/gracl.h>
86504+#include <linux/compat.h>
86505+
86506+/* notify of brain-dead configs */
86507+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
86508+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
86509+#endif
86510+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
86511+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
86512+#endif
86513+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
86514+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
86515+#endif
86516+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
86517+#error "CONFIG_PAX enabled, but no PaX options are enabled."
86518+#endif
86519+
86520+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
86521+void gr_handle_brute_check(void);
86522+void gr_handle_kernel_exploit(void);
86523+int gr_process_user_ban(void);
86524+
86525+char gr_roletype_to_char(void);
86526+
86527+int gr_acl_enable_at_secure(void);
86528+
86529+int gr_check_user_change(int real, int effective, int fs);
86530+int gr_check_group_change(int real, int effective, int fs);
86531+
86532+void gr_del_task_from_ip_table(struct task_struct *p);
86533+
86534+int gr_pid_is_chrooted(struct task_struct *p);
86535+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
86536+int gr_handle_chroot_nice(void);
86537+int gr_handle_chroot_sysctl(const int op);
86538+int gr_handle_chroot_setpriority(struct task_struct *p,
86539+ const int niceval);
86540+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
86541+int gr_handle_chroot_chroot(const struct dentry *dentry,
86542+ const struct vfsmount *mnt);
86543+void gr_handle_chroot_chdir(struct path *path);
86544+int gr_handle_chroot_chmod(const struct dentry *dentry,
86545+ const struct vfsmount *mnt, const int mode);
86546+int gr_handle_chroot_mknod(const struct dentry *dentry,
86547+ const struct vfsmount *mnt, const int mode);
86548+int gr_handle_chroot_mount(const struct dentry *dentry,
86549+ const struct vfsmount *mnt,
86550+ const char *dev_name);
86551+int gr_handle_chroot_pivot(void);
86552+int gr_handle_chroot_unix(const pid_t pid);
86553+
86554+int gr_handle_rawio(const struct inode *inode);
86555+
86556+void gr_handle_ioperm(void);
86557+void gr_handle_iopl(void);
86558+
86559+umode_t gr_acl_umask(void);
86560+
86561+int gr_tpe_allow(const struct file *file);
86562+
86563+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
86564+void gr_clear_chroot_entries(struct task_struct *task);
86565+
86566+void gr_log_forkfail(const int retval);
86567+void gr_log_timechange(void);
86568+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
86569+void gr_log_chdir(const struct dentry *dentry,
86570+ const struct vfsmount *mnt);
86571+void gr_log_chroot_exec(const struct dentry *dentry,
86572+ const struct vfsmount *mnt);
86573+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
86574+#ifdef CONFIG_COMPAT
86575+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
86576+#endif
86577+void gr_log_remount(const char *devname, const int retval);
86578+void gr_log_unmount(const char *devname, const int retval);
86579+void gr_log_mount(const char *from, const char *to, const int retval);
86580+void gr_log_textrel(struct vm_area_struct *vma);
86581+void gr_log_rwxmmap(struct file *file);
86582+void gr_log_rwxmprotect(struct file *file);
86583+
86584+int gr_handle_follow_link(const struct inode *parent,
86585+ const struct inode *inode,
86586+ const struct dentry *dentry,
86587+ const struct vfsmount *mnt);
86588+int gr_handle_fifo(const struct dentry *dentry,
86589+ const struct vfsmount *mnt,
86590+ const struct dentry *dir, const int flag,
86591+ const int acc_mode);
86592+int gr_handle_hardlink(const struct dentry *dentry,
86593+ const struct vfsmount *mnt,
86594+ struct inode *inode,
86595+ const int mode, const char *to);
86596+
86597+int gr_is_capable(const int cap);
86598+int gr_is_capable_nolog(const int cap);
86599+void gr_learn_resource(const struct task_struct *task, const int limit,
86600+ const unsigned long wanted, const int gt);
86601+void gr_copy_label(struct task_struct *tsk);
86602+void gr_handle_crash(struct task_struct *task, const int sig);
86603+int gr_handle_signal(const struct task_struct *p, const int sig);
86604+int gr_check_crash_uid(const uid_t uid);
86605+int gr_check_protected_task(const struct task_struct *task);
86606+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
86607+int gr_acl_handle_mmap(const struct file *file,
86608+ const unsigned long prot);
86609+int gr_acl_handle_mprotect(const struct file *file,
86610+ const unsigned long prot);
86611+int gr_check_hidden_task(const struct task_struct *tsk);
86612+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
86613+ const struct vfsmount *mnt);
86614+__u32 gr_acl_handle_utime(const struct dentry *dentry,
86615+ const struct vfsmount *mnt);
86616+__u32 gr_acl_handle_access(const struct dentry *dentry,
86617+ const struct vfsmount *mnt, const int fmode);
86618+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
86619+ const struct vfsmount *mnt, umode_t *mode);
86620+__u32 gr_acl_handle_chown(const struct dentry *dentry,
86621+ const struct vfsmount *mnt);
86622+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
86623+ const struct vfsmount *mnt);
86624+int gr_handle_ptrace(struct task_struct *task, const long request);
86625+int gr_handle_proc_ptrace(struct task_struct *task);
86626+__u32 gr_acl_handle_execve(const struct dentry *dentry,
86627+ const struct vfsmount *mnt);
86628+int gr_check_crash_exec(const struct file *filp);
86629+int gr_acl_is_enabled(void);
86630+void gr_set_kernel_label(struct task_struct *task);
86631+void gr_set_role_label(struct task_struct *task, const uid_t uid,
86632+ const gid_t gid);
86633+int gr_set_proc_label(const struct dentry *dentry,
86634+ const struct vfsmount *mnt,
86635+ const int unsafe_flags);
86636+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
86637+ const struct vfsmount *mnt);
86638+__u32 gr_acl_handle_open(const struct dentry *dentry,
86639+ const struct vfsmount *mnt, int acc_mode);
86640+__u32 gr_acl_handle_creat(const struct dentry *dentry,
86641+ const struct dentry *p_dentry,
86642+ const struct vfsmount *p_mnt,
86643+ int open_flags, int acc_mode, const int imode);
86644+void gr_handle_create(const struct dentry *dentry,
86645+ const struct vfsmount *mnt);
86646+void gr_handle_proc_create(const struct dentry *dentry,
86647+ const struct inode *inode);
86648+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
86649+ const struct dentry *parent_dentry,
86650+ const struct vfsmount *parent_mnt,
86651+ const int mode);
86652+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
86653+ const struct dentry *parent_dentry,
86654+ const struct vfsmount *parent_mnt);
86655+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
86656+ const struct vfsmount *mnt);
86657+void gr_handle_delete(const ino_t ino, const dev_t dev);
86658+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
86659+ const struct vfsmount *mnt);
86660+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
86661+ const struct dentry *parent_dentry,
86662+ const struct vfsmount *parent_mnt,
86663+ const char *from);
86664+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
86665+ const struct dentry *parent_dentry,
86666+ const struct vfsmount *parent_mnt,
86667+ const struct dentry *old_dentry,
86668+ const struct vfsmount *old_mnt, const char *to);
86669+int gr_acl_handle_rename(struct dentry *new_dentry,
86670+ struct dentry *parent_dentry,
86671+ const struct vfsmount *parent_mnt,
86672+ struct dentry *old_dentry,
86673+ struct inode *old_parent_inode,
86674+ struct vfsmount *old_mnt, const char *newname);
86675+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
86676+ struct dentry *old_dentry,
86677+ struct dentry *new_dentry,
86678+ struct vfsmount *mnt, const __u8 replace);
86679+__u32 gr_check_link(const struct dentry *new_dentry,
86680+ const struct dentry *parent_dentry,
86681+ const struct vfsmount *parent_mnt,
86682+ const struct dentry *old_dentry,
86683+ const struct vfsmount *old_mnt);
86684+int gr_acl_handle_filldir(const struct file *file, const char *name,
86685+ const unsigned int namelen, const ino_t ino);
86686+
86687+__u32 gr_acl_handle_unix(const struct dentry *dentry,
86688+ const struct vfsmount *mnt);
86689+void gr_acl_handle_exit(void);
86690+void gr_acl_handle_psacct(struct task_struct *task, const long code);
86691+int gr_acl_handle_procpidmem(const struct task_struct *task);
86692+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
86693+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
86694+void gr_audit_ptrace(struct task_struct *task);
86695+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
86696+
86697+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
86698+
86699+#ifdef CONFIG_GRKERNSEC
86700+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
86701+void gr_handle_vm86(void);
86702+void gr_handle_mem_readwrite(u64 from, u64 to);
86703+
86704+void gr_log_badprocpid(const char *entry);
86705+
86706+extern int grsec_enable_dmesg;
86707+extern int grsec_disable_privio;
86708+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
86709+extern int grsec_enable_chroot_findtask;
86710+#endif
86711+#ifdef CONFIG_GRKERNSEC_SETXID
86712+extern int grsec_enable_setxid;
86713+#endif
86714+#endif
86715+
86716+#endif
86717diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
86718index 6a87154..a3ce57b 100644
86719--- a/include/linux/hdpu_features.h
86720+++ b/include/linux/hdpu_features.h
86721@@ -3,7 +3,7 @@
86722 struct cpustate_t {
86723 spinlock_t lock;
86724 int excl;
86725- int open_count;
86726+ atomic_t open_count;
86727 unsigned char cached_val;
86728 int inited;
86729 unsigned long *set_addr;
86730diff --git a/include/linux/highmem.h b/include/linux/highmem.h
86731index 211ff44..00ab6d7 100644
86732--- a/include/linux/highmem.h
86733+++ b/include/linux/highmem.h
86734@@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page)
86735 kunmap_atomic(kaddr, KM_USER0);
86736 }
86737
86738+static inline void sanitize_highpage(struct page *page)
86739+{
86740+ void *kaddr;
86741+ unsigned long flags;
86742+
86743+ local_irq_save(flags);
86744+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
86745+ clear_page(kaddr);
86746+ kunmap_atomic(kaddr, KM_CLEARPAGE);
86747+ local_irq_restore(flags);
86748+}
86749+
86750 static inline void zero_user_segments(struct page *page,
86751 unsigned start1, unsigned end1,
86752 unsigned start2, unsigned end2)
86753diff --git a/include/linux/i2c.h b/include/linux/i2c.h
86754index 7b40cda..24eb44e 100644
86755--- a/include/linux/i2c.h
86756+++ b/include/linux/i2c.h
86757@@ -325,6 +325,7 @@ struct i2c_algorithm {
86758 /* To determine what the adapter supports */
86759 u32 (*functionality) (struct i2c_adapter *);
86760 };
86761+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
86762
86763 /*
86764 * i2c_adapter is the structure used to identify a physical i2c bus along
86765diff --git a/include/linux/i2o.h b/include/linux/i2o.h
86766index 4c4e57d..f3c5303 100644
86767--- a/include/linux/i2o.h
86768+++ b/include/linux/i2o.h
86769@@ -564,7 +564,7 @@ struct i2o_controller {
86770 struct i2o_device *exec; /* Executive */
86771 #if BITS_PER_LONG == 64
86772 spinlock_t context_list_lock; /* lock for context_list */
86773- atomic_t context_list_counter; /* needed for unique contexts */
86774+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
86775 struct list_head context_list; /* list of context id's
86776 and pointers */
86777 #endif
86778diff --git a/include/linux/init_task.h b/include/linux/init_task.h
86779index 21a6f5d..dc42eab 100644
86780--- a/include/linux/init_task.h
86781+++ b/include/linux/init_task.h
86782@@ -83,6 +83,12 @@ extern struct group_info init_groups;
86783 #define INIT_IDS
86784 #endif
86785
86786+#ifdef CONFIG_X86
86787+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
86788+#else
86789+#define INIT_TASK_THREAD_INFO
86790+#endif
86791+
86792 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
86793 /*
86794 * Because of the reduced scope of CAP_SETPCAP when filesystem
86795@@ -156,6 +162,7 @@ extern struct cred init_cred;
86796 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
86797 .comm = "swapper", \
86798 .thread = INIT_THREAD, \
86799+ INIT_TASK_THREAD_INFO \
86800 .fs = &init_fs, \
86801 .files = &init_files, \
86802 .signal = &init_signals, \
86803diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
86804index 4f0a72a..a849599 100644
86805--- a/include/linux/intel-iommu.h
86806+++ b/include/linux/intel-iommu.h
86807@@ -296,7 +296,7 @@ struct iommu_flush {
86808 u8 fm, u64 type);
86809 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
86810 unsigned int size_order, u64 type);
86811-};
86812+} __no_const;
86813
86814 enum {
86815 SR_DMAR_FECTL_REG,
86816diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
86817index c739150..be577b5 100644
86818--- a/include/linux/interrupt.h
86819+++ b/include/linux/interrupt.h
86820@@ -369,7 +369,7 @@ enum
86821 /* map softirq index to softirq name. update 'softirq_to_name' in
86822 * kernel/softirq.c when adding a new softirq.
86823 */
86824-extern char *softirq_to_name[NR_SOFTIRQS];
86825+extern const char * const softirq_to_name[NR_SOFTIRQS];
86826
86827 /* softirq mask and active fields moved to irq_cpustat_t in
86828 * asm/hardirq.h to get better cache usage. KAO
86829@@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
86830
86831 struct softirq_action
86832 {
86833- void (*action)(struct softirq_action *);
86834+ void (*action)(void);
86835 };
86836
86837 asmlinkage void do_softirq(void);
86838 asmlinkage void __do_softirq(void);
86839-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
86840+extern void open_softirq(int nr, void (*action)(void));
86841 extern void softirq_init(void);
86842 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
86843 extern void raise_softirq_irqoff(unsigned int nr);
86844diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
86845index eb73632..19abfc1 100644
86846--- a/include/linux/iocontext.h
86847+++ b/include/linux/iocontext.h
86848@@ -94,14 +94,15 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
86849 return NULL;
86850 }
86851
86852+struct task_struct;
86853 #ifdef CONFIG_BLOCK
86854 int put_io_context(struct io_context *ioc);
86855-void exit_io_context(void);
86856+void exit_io_context(struct task_struct *task);
86857 struct io_context *get_io_context(gfp_t gfp_flags, int node);
86858 struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
86859 void copy_io_context(struct io_context **pdst, struct io_context **psrc);
86860 #else
86861-static inline void exit_io_context(void)
86862+static inline void exit_io_context(struct task_struct *task)
86863 {
86864 }
86865
86866diff --git a/include/linux/irq.h b/include/linux/irq.h
86867index 9e5f45a..025865b 100644
86868--- a/include/linux/irq.h
86869+++ b/include/linux/irq.h
86870@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
86871 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
86872 bool boot)
86873 {
86874+#ifdef CONFIG_CPUMASK_OFFSTACK
86875 gfp_t gfp = GFP_ATOMIC;
86876
86877 if (boot)
86878 gfp = GFP_NOWAIT;
86879
86880-#ifdef CONFIG_CPUMASK_OFFSTACK
86881 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
86882 return false;
86883
86884diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
86885index 7922742..27306a2 100644
86886--- a/include/linux/kallsyms.h
86887+++ b/include/linux/kallsyms.h
86888@@ -15,7 +15,8 @@
86889
86890 struct module;
86891
86892-#ifdef CONFIG_KALLSYMS
86893+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
86894+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
86895 /* Lookup the address for a symbol. Returns 0 if not found. */
86896 unsigned long kallsyms_lookup_name(const char *name);
86897
86898@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
86899 /* Stupid that this does nothing, but I didn't create this mess. */
86900 #define __print_symbol(fmt, addr)
86901 #endif /*CONFIG_KALLSYMS*/
86902+#else /* when included by kallsyms.c, vsnprintf.c, or
86903+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
86904+extern void __print_symbol(const char *fmt, unsigned long address);
86905+extern int sprint_symbol(char *buffer, unsigned long address);
86906+const char *kallsyms_lookup(unsigned long addr,
86907+ unsigned long *symbolsize,
86908+ unsigned long *offset,
86909+ char **modname, char *namebuf);
86910+#endif
86911
86912 /* This macro allows us to keep printk typechecking */
86913 static void __check_printsym_format(const char *fmt, ...)
86914diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
86915index 6adcc29..13369e8 100644
86916--- a/include/linux/kgdb.h
86917+++ b/include/linux/kgdb.h
86918@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
86919
86920 extern int kgdb_connected;
86921
86922-extern atomic_t kgdb_setting_breakpoint;
86923-extern atomic_t kgdb_cpu_doing_single_step;
86924+extern atomic_unchecked_t kgdb_setting_breakpoint;
86925+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
86926
86927 extern struct task_struct *kgdb_usethread;
86928 extern struct task_struct *kgdb_contthread;
86929@@ -235,7 +235,7 @@ struct kgdb_arch {
86930 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
86931 void (*remove_all_hw_break)(void);
86932 void (*correct_hw_break)(void);
86933-};
86934+} __do_const;
86935
86936 /**
86937 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
86938@@ -257,14 +257,14 @@ struct kgdb_io {
86939 int (*init) (void);
86940 void (*pre_exception) (void);
86941 void (*post_exception) (void);
86942-};
86943+} __do_const;
86944
86945-extern struct kgdb_arch arch_kgdb_ops;
86946+extern const struct kgdb_arch arch_kgdb_ops;
86947
86948 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
86949
86950-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
86951-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
86952+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
86953+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
86954
86955 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
86956 extern int kgdb_mem2hex(char *mem, char *buf, int count);
86957diff --git a/include/linux/kmod.h b/include/linux/kmod.h
86958index 0546fe7..2a22bc1 100644
86959--- a/include/linux/kmod.h
86960+++ b/include/linux/kmod.h
86961@@ -31,6 +31,8 @@
86962 * usually useless though. */
86963 extern int __request_module(bool wait, const char *name, ...) \
86964 __attribute__((format(printf, 2, 3)));
86965+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
86966+ __attribute__((format(printf, 3, 4)));
86967 #define request_module(mod...) __request_module(true, mod)
86968 #define request_module_nowait(mod...) __request_module(false, mod)
86969 #define try_then_request_module(x, mod...) \
86970diff --git a/include/linux/kobject.h b/include/linux/kobject.h
86971index 58ae8e0..3950d3c 100644
86972--- a/include/linux/kobject.h
86973+++ b/include/linux/kobject.h
86974@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
86975
86976 struct kobj_type {
86977 void (*release)(struct kobject *kobj);
86978- struct sysfs_ops *sysfs_ops;
86979+ const struct sysfs_ops *sysfs_ops;
86980 struct attribute **default_attrs;
86981 };
86982
86983@@ -118,9 +118,9 @@ struct kobj_uevent_env {
86984 };
86985
86986 struct kset_uevent_ops {
86987- int (*filter)(struct kset *kset, struct kobject *kobj);
86988- const char *(*name)(struct kset *kset, struct kobject *kobj);
86989- int (*uevent)(struct kset *kset, struct kobject *kobj,
86990+ int (* const filter)(struct kset *kset, struct kobject *kobj);
86991+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
86992+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
86993 struct kobj_uevent_env *env);
86994 };
86995
86996@@ -132,7 +132,7 @@ struct kobj_attribute {
86997 const char *buf, size_t count);
86998 };
86999
87000-extern struct sysfs_ops kobj_sysfs_ops;
87001+extern const struct sysfs_ops kobj_sysfs_ops;
87002
87003 /**
87004 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
87005@@ -155,14 +155,14 @@ struct kset {
87006 struct list_head list;
87007 spinlock_t list_lock;
87008 struct kobject kobj;
87009- struct kset_uevent_ops *uevent_ops;
87010+ const struct kset_uevent_ops *uevent_ops;
87011 };
87012
87013 extern void kset_init(struct kset *kset);
87014 extern int __must_check kset_register(struct kset *kset);
87015 extern void kset_unregister(struct kset *kset);
87016 extern struct kset * __must_check kset_create_and_add(const char *name,
87017- struct kset_uevent_ops *u,
87018+ const struct kset_uevent_ops *u,
87019 struct kobject *parent_kobj);
87020
87021 static inline struct kset *to_kset(struct kobject *kobj)
87022diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
87023index c728a50..762821f 100644
87024--- a/include/linux/kvm_host.h
87025+++ b/include/linux/kvm_host.h
87026@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
87027 void vcpu_load(struct kvm_vcpu *vcpu);
87028 void vcpu_put(struct kvm_vcpu *vcpu);
87029
87030-int kvm_init(void *opaque, unsigned int vcpu_size,
87031+int kvm_init(const void *opaque, unsigned int vcpu_size,
87032 struct module *module);
87033 void kvm_exit(void);
87034
87035@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
87036 struct kvm_guest_debug *dbg);
87037 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
87038
87039-int kvm_arch_init(void *opaque);
87040+int kvm_arch_init(const void *opaque);
87041 void kvm_arch_exit(void);
87042
87043 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
87044@@ -519,7 +519,7 @@ int kvm_setup_default_irq_routing(struct kvm *kvm);
87045 int kvm_set_irq_routing(struct kvm *kvm,
87046 const struct kvm_irq_routing_entry *entries,
87047 unsigned nr,
87048- unsigned flags);
87049+ unsigned flags) __size_overflow(3);
87050 void kvm_free_irq_routing(struct kvm *kvm);
87051
87052 #else
87053diff --git a/include/linux/libata.h b/include/linux/libata.h
87054index a069916..223edde 100644
87055--- a/include/linux/libata.h
87056+++ b/include/linux/libata.h
87057@@ -525,11 +525,11 @@ struct ata_ioports {
87058
87059 struct ata_host {
87060 spinlock_t lock;
87061- struct device *dev;
87062+ struct device *dev;
87063 void __iomem * const *iomap;
87064 unsigned int n_ports;
87065 void *private_data;
87066- struct ata_port_operations *ops;
87067+ const struct ata_port_operations *ops;
87068 unsigned long flags;
87069 #ifdef CONFIG_ATA_ACPI
87070 acpi_handle acpi_handle;
87071@@ -710,7 +710,7 @@ struct ata_link {
87072
87073 struct ata_port {
87074 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
87075- struct ata_port_operations *ops;
87076+ const struct ata_port_operations *ops;
87077 spinlock_t *lock;
87078 /* Flags owned by the EH context. Only EH should touch these once the
87079 port is active */
87080@@ -884,7 +884,7 @@ struct ata_port_operations {
87081 * fields must be pointers.
87082 */
87083 const struct ata_port_operations *inherits;
87084-};
87085+} __do_const;
87086
87087 struct ata_port_info {
87088 unsigned long flags;
87089@@ -892,7 +892,7 @@ struct ata_port_info {
87090 unsigned long pio_mask;
87091 unsigned long mwdma_mask;
87092 unsigned long udma_mask;
87093- struct ata_port_operations *port_ops;
87094+ const struct ata_port_operations *port_ops;
87095 void *private_data;
87096 };
87097
87098@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[];
87099 extern const unsigned long sata_deb_timing_hotplug[];
87100 extern const unsigned long sata_deb_timing_long[];
87101
87102-extern struct ata_port_operations ata_dummy_port_ops;
87103+extern const struct ata_port_operations ata_dummy_port_ops;
87104 extern const struct ata_port_info ata_dummy_port_info;
87105
87106 static inline const unsigned long *
87107@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
87108 struct scsi_host_template *sht);
87109 extern void ata_host_detach(struct ata_host *host);
87110 extern void ata_host_init(struct ata_host *, struct device *,
87111- unsigned long, struct ata_port_operations *);
87112+ unsigned long, const struct ata_port_operations *);
87113 extern int ata_scsi_detect(struct scsi_host_template *sht);
87114 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
87115 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
87116diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
87117index fbc48f8..0886e57 100644
87118--- a/include/linux/lockd/bind.h
87119+++ b/include/linux/lockd/bind.h
87120@@ -23,13 +23,13 @@ struct svc_rqst;
87121 * This is the set of functions for lockd->nfsd communication
87122 */
87123 struct nlmsvc_binding {
87124- __be32 (*fopen)(struct svc_rqst *,
87125+ __be32 (* const fopen)(struct svc_rqst *,
87126 struct nfs_fh *,
87127 struct file **);
87128- void (*fclose)(struct file *);
87129+ void (* const fclose)(struct file *);
87130 };
87131
87132-extern struct nlmsvc_binding * nlmsvc_ops;
87133+extern const struct nlmsvc_binding * nlmsvc_ops;
87134
87135 /*
87136 * Similar to nfs_client_initdata, but without the NFS-specific
87137diff --git a/include/linux/mca.h b/include/linux/mca.h
87138index 3797270..7765ede 100644
87139--- a/include/linux/mca.h
87140+++ b/include/linux/mca.h
87141@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
87142 int region);
87143 void * (*mca_transform_memory)(struct mca_device *,
87144 void *memory);
87145-};
87146+} __no_const;
87147
87148 struct mca_bus {
87149 u64 default_dma_mask;
87150diff --git a/include/linux/memory.h b/include/linux/memory.h
87151index 37fa19b..b597c85 100644
87152--- a/include/linux/memory.h
87153+++ b/include/linux/memory.h
87154@@ -108,7 +108,7 @@ struct memory_accessor {
87155 size_t count);
87156 ssize_t (*write)(struct memory_accessor *, const char *buf,
87157 off_t offset, size_t count);
87158-};
87159+} __no_const;
87160
87161 /*
87162 * Kernel text modification mutex, used for code patching. Users of this lock
87163diff --git a/include/linux/mm.h b/include/linux/mm.h
87164index 11e5be6..1ff2423 100644
87165--- a/include/linux/mm.h
87166+++ b/include/linux/mm.h
87167@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp);
87168
87169 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
87170 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
87171+
87172+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
87173+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
87174+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
87175+#else
87176 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
87177+#endif
87178+
87179 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
87180 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
87181
87182@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
87183 int set_page_dirty_lock(struct page *page);
87184 int clear_page_dirty_for_io(struct page *page);
87185
87186-/* Is the vma a continuation of the stack vma above it? */
87187-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
87188-{
87189- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
87190-}
87191-
87192 extern unsigned long move_page_tables(struct vm_area_struct *vma,
87193 unsigned long old_addr, struct vm_area_struct *new_vma,
87194 unsigned long new_addr, unsigned long len);
87195@@ -890,6 +891,8 @@ struct shrinker {
87196 extern void register_shrinker(struct shrinker *);
87197 extern void unregister_shrinker(struct shrinker *);
87198
87199+pgprot_t vm_get_page_prot(unsigned long vm_flags);
87200+
87201 int vma_wants_writenotify(struct vm_area_struct *vma);
87202
87203 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
87204@@ -1162,6 +1165,7 @@ out:
87205 }
87206
87207 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
87208+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
87209
87210 extern unsigned long do_brk(unsigned long, unsigned long);
87211
87212@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
87213 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
87214 struct vm_area_struct **pprev);
87215
87216+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
87217+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
87218+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
87219+
87220 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
87221 NULL if none. Assume start_addr < end_addr. */
87222 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
87223@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
87224 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
87225 }
87226
87227-pgprot_t vm_get_page_prot(unsigned long vm_flags);
87228 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
87229 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
87230 unsigned long pfn, unsigned long size, pgprot_t);
87231@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long pfn, int trapno);
87232 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
87233 extern int sysctl_memory_failure_early_kill;
87234 extern int sysctl_memory_failure_recovery;
87235-extern atomic_long_t mce_bad_pages;
87236+extern atomic_long_unchecked_t mce_bad_pages;
87237+
87238+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
87239+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
87240+#else
87241+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
87242+#endif
87243
87244 #endif /* __KERNEL__ */
87245 #endif /* _LINUX_MM_H */
87246diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
87247index 9d12ed5..6d9707a 100644
87248--- a/include/linux/mm_types.h
87249+++ b/include/linux/mm_types.h
87250@@ -186,6 +186,8 @@ struct vm_area_struct {
87251 #ifdef CONFIG_NUMA
87252 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
87253 #endif
87254+
87255+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
87256 };
87257
87258 struct core_thread {
87259@@ -287,6 +289,24 @@ struct mm_struct {
87260 #ifdef CONFIG_MMU_NOTIFIER
87261 struct mmu_notifier_mm *mmu_notifier_mm;
87262 #endif
87263+
87264+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
87265+ unsigned long pax_flags;
87266+#endif
87267+
87268+#ifdef CONFIG_PAX_DLRESOLVE
87269+ unsigned long call_dl_resolve;
87270+#endif
87271+
87272+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
87273+ unsigned long call_syscall;
87274+#endif
87275+
87276+#ifdef CONFIG_PAX_ASLR
87277+ unsigned long delta_mmap; /* randomized offset */
87278+ unsigned long delta_stack; /* randomized offset */
87279+#endif
87280+
87281 };
87282
87283 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
87284diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
87285index 4e02ee2..afb159e 100644
87286--- a/include/linux/mmu_notifier.h
87287+++ b/include/linux/mmu_notifier.h
87288@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
87289 */
87290 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
87291 ({ \
87292- pte_t __pte; \
87293+ pte_t ___pte; \
87294 struct vm_area_struct *___vma = __vma; \
87295 unsigned long ___address = __address; \
87296- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
87297+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
87298 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
87299- __pte; \
87300+ ___pte; \
87301 })
87302
87303 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
87304diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
87305index 6c31a2a..4b0e930 100644
87306--- a/include/linux/mmzone.h
87307+++ b/include/linux/mmzone.h
87308@@ -350,7 +350,7 @@ struct zone {
87309 unsigned long flags; /* zone flags, see below */
87310
87311 /* Zone statistics */
87312- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
87313+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
87314
87315 /*
87316 * prev_priority holds the scanning priority for this zone. It is
87317diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
87318index f58e9d8..3503935 100644
87319--- a/include/linux/mod_devicetable.h
87320+++ b/include/linux/mod_devicetable.h
87321@@ -12,7 +12,7 @@
87322 typedef unsigned long kernel_ulong_t;
87323 #endif
87324
87325-#define PCI_ANY_ID (~0)
87326+#define PCI_ANY_ID ((__u16)~0)
87327
87328 struct pci_device_id {
87329 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
87330@@ -131,7 +131,7 @@ struct usb_device_id {
87331 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
87332 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
87333
87334-#define HID_ANY_ID (~0)
87335+#define HID_ANY_ID (~0U)
87336
87337 struct hid_device_id {
87338 __u16 bus;
87339diff --git a/include/linux/module.h b/include/linux/module.h
87340index 482efc8..642032b 100644
87341--- a/include/linux/module.h
87342+++ b/include/linux/module.h
87343@@ -16,6 +16,7 @@
87344 #include <linux/kobject.h>
87345 #include <linux/moduleparam.h>
87346 #include <linux/tracepoint.h>
87347+#include <linux/fs.h>
87348
87349 #include <asm/local.h>
87350 #include <asm/module.h>
87351@@ -287,16 +288,16 @@ struct module
87352 int (*init)(void);
87353
87354 /* If this is non-NULL, vfree after init() returns */
87355- void *module_init;
87356+ void *module_init_rx, *module_init_rw;
87357
87358 /* Here is the actual code + data, vfree'd on unload. */
87359- void *module_core;
87360+ void *module_core_rx, *module_core_rw;
87361
87362 /* Here are the sizes of the init and core sections */
87363- unsigned int init_size, core_size;
87364+ unsigned int init_size_rw, core_size_rw;
87365
87366 /* The size of the executable code in each section. */
87367- unsigned int init_text_size, core_text_size;
87368+ unsigned int init_size_rx, core_size_rx;
87369
87370 /* Arch-specific module values */
87371 struct mod_arch_specific arch;
87372@@ -345,6 +346,10 @@ struct module
87373 #ifdef CONFIG_EVENT_TRACING
87374 struct ftrace_event_call *trace_events;
87375 unsigned int num_trace_events;
87376+ struct file_operations trace_id;
87377+ struct file_operations trace_enable;
87378+ struct file_operations trace_format;
87379+ struct file_operations trace_filter;
87380 #endif
87381 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
87382 unsigned long *ftrace_callsites;
87383@@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
87384 bool is_module_address(unsigned long addr);
87385 bool is_module_text_address(unsigned long addr);
87386
87387+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
87388+{
87389+
87390+#ifdef CONFIG_PAX_KERNEXEC
87391+ if (ktla_ktva(addr) >= (unsigned long)start &&
87392+ ktla_ktva(addr) < (unsigned long)start + size)
87393+ return 1;
87394+#endif
87395+
87396+ return ((void *)addr >= start && (void *)addr < start + size);
87397+}
87398+
87399+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
87400+{
87401+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
87402+}
87403+
87404+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
87405+{
87406+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
87407+}
87408+
87409+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
87410+{
87411+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
87412+}
87413+
87414+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
87415+{
87416+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
87417+}
87418+
87419 static inline int within_module_core(unsigned long addr, struct module *mod)
87420 {
87421- return (unsigned long)mod->module_core <= addr &&
87422- addr < (unsigned long)mod->module_core + mod->core_size;
87423+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
87424 }
87425
87426 static inline int within_module_init(unsigned long addr, struct module *mod)
87427 {
87428- return (unsigned long)mod->module_init <= addr &&
87429- addr < (unsigned long)mod->module_init + mod->init_size;
87430+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
87431 }
87432
87433 /* Search for module by name: must hold module_mutex. */
87434diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
87435index c1f40c2..e875ff4 100644
87436--- a/include/linux/moduleloader.h
87437+++ b/include/linux/moduleloader.h
87438@@ -18,11 +18,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
87439
87440 /* Allocator used for allocating struct module, core sections and init
87441 sections. Returns NULL on failure. */
87442-void *module_alloc(unsigned long size);
87443+void *module_alloc(unsigned long size) __size_overflow(1);
87444+
87445+#ifdef CONFIG_PAX_KERNEXEC
87446+void *module_alloc_exec(unsigned long size);
87447+#else
87448+#define module_alloc_exec(x) module_alloc(x)
87449+#endif
87450
87451 /* Free memory returned from module_alloc. */
87452 void module_free(struct module *mod, void *module_region);
87453
87454+#ifdef CONFIG_PAX_KERNEXEC
87455+void module_free_exec(struct module *mod, void *module_region);
87456+#else
87457+#define module_free_exec(x, y) module_free((x), (y))
87458+#endif
87459+
87460 /* Apply the given relocation to the (simplified) ELF. Return -error
87461 or 0. */
87462 int apply_relocate(Elf_Shdr *sechdrs,
87463diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
87464index 82a9124..8a5f622 100644
87465--- a/include/linux/moduleparam.h
87466+++ b/include/linux/moduleparam.h
87467@@ -132,7 +132,7 @@ struct kparam_array
87468
87469 /* Actually copy string: maxlen param is usually sizeof(string). */
87470 #define module_param_string(name, string, len, perm) \
87471- static const struct kparam_string __param_string_##name \
87472+ static const struct kparam_string __param_string_##name __used \
87473 = { len, string }; \
87474 __module_param_call(MODULE_PARAM_PREFIX, name, \
87475 param_set_copystring, param_get_string, \
87476@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp);
87477
87478 /* Comma-separated array: *nump is set to number they actually specified. */
87479 #define module_param_array_named(name, array, type, nump, perm) \
87480- static const struct kparam_array __param_arr_##name \
87481+ static const struct kparam_array __param_arr_##name __used \
87482 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
87483 sizeof(array[0]), array }; \
87484 __module_param_call(MODULE_PARAM_PREFIX, name, \
87485diff --git a/include/linux/mutex.h b/include/linux/mutex.h
87486index 878cab4..c92cb3e 100644
87487--- a/include/linux/mutex.h
87488+++ b/include/linux/mutex.h
87489@@ -51,7 +51,7 @@ struct mutex {
87490 spinlock_t wait_lock;
87491 struct list_head wait_list;
87492 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
87493- struct thread_info *owner;
87494+ struct task_struct *owner;
87495 #endif
87496 #ifdef CONFIG_DEBUG_MUTEXES
87497 const char *name;
87498diff --git a/include/linux/namei.h b/include/linux/namei.h
87499index ec0f607..d19e675 100644
87500--- a/include/linux/namei.h
87501+++ b/include/linux/namei.h
87502@@ -22,7 +22,7 @@ struct nameidata {
87503 unsigned int flags;
87504 int last_type;
87505 unsigned depth;
87506- char *saved_names[MAX_NESTED_LINKS + 1];
87507+ const char *saved_names[MAX_NESTED_LINKS + 1];
87508
87509 /* Intent data */
87510 union {
87511@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
87512 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
87513 extern void unlock_rename(struct dentry *, struct dentry *);
87514
87515-static inline void nd_set_link(struct nameidata *nd, char *path)
87516+static inline void nd_set_link(struct nameidata *nd, const char *path)
87517 {
87518 nd->saved_names[nd->depth] = path;
87519 }
87520
87521-static inline char *nd_get_link(struct nameidata *nd)
87522+static inline const char *nd_get_link(const struct nameidata *nd)
87523 {
87524 return nd->saved_names[nd->depth];
87525 }
87526diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
87527index 9d7e8f7..04428c5 100644
87528--- a/include/linux/netdevice.h
87529+++ b/include/linux/netdevice.h
87530@@ -637,6 +637,7 @@ struct net_device_ops {
87531 u16 xid);
87532 #endif
87533 };
87534+typedef struct net_device_ops __no_const net_device_ops_no_const;
87535
87536 /*
87537 * The DEVICE structure.
87538diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
87539new file mode 100644
87540index 0000000..33f4af8
87541--- /dev/null
87542+++ b/include/linux/netfilter/xt_gradm.h
87543@@ -0,0 +1,9 @@
87544+#ifndef _LINUX_NETFILTER_XT_GRADM_H
87545+#define _LINUX_NETFILTER_XT_GRADM_H 1
87546+
87547+struct xt_gradm_mtinfo {
87548+ __u16 flags;
87549+ __u16 invflags;
87550+};
87551+
87552+#endif
87553diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
87554index b359c4a..c08b334 100644
87555--- a/include/linux/nodemask.h
87556+++ b/include/linux/nodemask.h
87557@@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state)
87558
87559 #define any_online_node(mask) \
87560 ({ \
87561- int node; \
87562- for_each_node_mask(node, (mask)) \
87563- if (node_online(node)) \
87564+ int __node; \
87565+ for_each_node_mask(__node, (mask)) \
87566+ if (node_online(__node)) \
87567 break; \
87568- node; \
87569+ __node; \
87570 })
87571
87572 #define num_online_nodes() num_node_state(N_ONLINE)
87573diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
87574index 5171639..81f30d3 100644
87575--- a/include/linux/oprofile.h
87576+++ b/include/linux/oprofile.h
87577@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
87578 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
87579 char const * name, ulong * val);
87580
87581-/** Create a file for read-only access to an atomic_t. */
87582+/** Create a file for read-only access to an atomic_unchecked_t. */
87583 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
87584- char const * name, atomic_t * val);
87585+ char const * name, atomic_unchecked_t * val);
87586
87587 /** create a directory */
87588 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
87589@@ -153,7 +153,7 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t co
87590 * Read an ASCII string for a number from a userspace buffer and fill *val on success.
87591 * Returns 0 on success, < 0 on error.
87592 */
87593-int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
87594+int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count) __size_overflow(3);
87595
87596 /** lock for read/write safety */
87597 extern spinlock_t oprofilefs_lock;
87598diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
87599index 3c62ed4..8924c7c 100644
87600--- a/include/linux/pagemap.h
87601+++ b/include/linux/pagemap.h
87602@@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
87603 if (((unsigned long)uaddr & PAGE_MASK) !=
87604 ((unsigned long)end & PAGE_MASK))
87605 ret = __get_user(c, end);
87606+ (void)c;
87607 }
87608+ (void)c;
87609 return ret;
87610 }
87611
87612diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
87613index 81c9689..a567a55 100644
87614--- a/include/linux/perf_event.h
87615+++ b/include/linux/perf_event.h
87616@@ -476,7 +476,7 @@ struct hw_perf_event {
87617 struct hrtimer hrtimer;
87618 };
87619 };
87620- atomic64_t prev_count;
87621+ atomic64_unchecked_t prev_count;
87622 u64 sample_period;
87623 u64 last_period;
87624 atomic64_t period_left;
87625@@ -557,7 +557,7 @@ struct perf_event {
87626 const struct pmu *pmu;
87627
87628 enum perf_event_active_state state;
87629- atomic64_t count;
87630+ atomic64_unchecked_t count;
87631
87632 /*
87633 * These are the total time in nanoseconds that the event
87634@@ -595,8 +595,8 @@ struct perf_event {
87635 * These accumulate total time (in nanoseconds) that children
87636 * events have been enabled and running, respectively.
87637 */
87638- atomic64_t child_total_time_enabled;
87639- atomic64_t child_total_time_running;
87640+ atomic64_unchecked_t child_total_time_enabled;
87641+ atomic64_unchecked_t child_total_time_running;
87642
87643 /*
87644 * Protect attach/detach and child_list:
87645diff --git a/include/linux/personality.h b/include/linux/personality.h
87646index 1261208..ddef96f 100644
87647--- a/include/linux/personality.h
87648+++ b/include/linux/personality.h
87649@@ -43,6 +43,7 @@ enum {
87650 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
87651 ADDR_NO_RANDOMIZE | \
87652 ADDR_COMPAT_LAYOUT | \
87653+ ADDR_LIMIT_3GB | \
87654 MMAP_PAGE_ZERO)
87655
87656 /*
87657diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
87658index b43a9e0..b77d869 100644
87659--- a/include/linux/pipe_fs_i.h
87660+++ b/include/linux/pipe_fs_i.h
87661@@ -46,9 +46,9 @@ struct pipe_inode_info {
87662 wait_queue_head_t wait;
87663 unsigned int nrbufs, curbuf;
87664 struct page *tmp_page;
87665- unsigned int readers;
87666- unsigned int writers;
87667- unsigned int waiting_writers;
87668+ atomic_t readers;
87669+ atomic_t writers;
87670+ atomic_t waiting_writers;
87671 unsigned int r_counter;
87672 unsigned int w_counter;
87673 struct fasync_struct *fasync_readers;
87674diff --git a/include/linux/poison.h b/include/linux/poison.h
87675index 34066ff..e95d744 100644
87676--- a/include/linux/poison.h
87677+++ b/include/linux/poison.h
87678@@ -19,8 +19,8 @@
87679 * under normal circumstances, used to verify that nobody uses
87680 * non-initialized list entries.
87681 */
87682-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
87683-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
87684+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
87685+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
87686
87687 /********** include/linux/timer.h **********/
87688 /*
87689diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
87690index 4f71bf4..cd2f68e 100644
87691--- a/include/linux/posix-timers.h
87692+++ b/include/linux/posix-timers.h
87693@@ -82,7 +82,8 @@ struct k_clock {
87694 #define TIMER_RETRY 1
87695 void (*timer_get) (struct k_itimer * timr,
87696 struct itimerspec * cur_setting);
87697-};
87698+} __do_const;
87699+typedef struct k_clock __no_const k_clock_no_const;
87700
87701 void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock);
87702
87703diff --git a/include/linux/preempt.h b/include/linux/preempt.h
87704index 72b1a10..13303a9 100644
87705--- a/include/linux/preempt.h
87706+++ b/include/linux/preempt.h
87707@@ -110,7 +110,7 @@ struct preempt_ops {
87708 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
87709 void (*sched_out)(struct preempt_notifier *notifier,
87710 struct task_struct *next);
87711-};
87712+} __no_const;
87713
87714 /**
87715 * preempt_notifier - key for installing preemption notifiers
87716diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h
87717index af7c36a..a93005c 100644
87718--- a/include/linux/prefetch.h
87719+++ b/include/linux/prefetch.h
87720@@ -11,6 +11,7 @@
87721 #define _LINUX_PREFETCH_H
87722
87723 #include <linux/types.h>
87724+#include <linux/const.h>
87725 #include <asm/processor.h>
87726 #include <asm/cache.h>
87727
87728diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
87729index 379eaed..1bf73e3 100644
87730--- a/include/linux/proc_fs.h
87731+++ b/include/linux/proc_fs.h
87732@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
87733 return proc_create_data(name, mode, parent, proc_fops, NULL);
87734 }
87735
87736+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
87737+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
87738+{
87739+#ifdef CONFIG_GRKERNSEC_PROC_USER
87740+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
87741+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
87742+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
87743+#else
87744+ return proc_create_data(name, mode, parent, proc_fops, NULL);
87745+#endif
87746+}
87747+
87748+
87749 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
87750 mode_t mode, struct proc_dir_entry *base,
87751 read_proc_t *read_proc, void * data)
87752@@ -256,7 +269,7 @@ union proc_op {
87753 int (*proc_show)(struct seq_file *m,
87754 struct pid_namespace *ns, struct pid *pid,
87755 struct task_struct *task);
87756-};
87757+} __no_const;
87758
87759 struct ctl_table_header;
87760 struct ctl_table;
87761diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
87762index 7456d7d..6c1cfc9 100644
87763--- a/include/linux/ptrace.h
87764+++ b/include/linux/ptrace.h
87765@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
87766 extern void exit_ptrace(struct task_struct *tracer);
87767 #define PTRACE_MODE_READ 1
87768 #define PTRACE_MODE_ATTACH 2
87769-/* Returns 0 on success, -errno on denial. */
87770-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
87771 /* Returns true on success, false on denial. */
87772 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
87773+/* Returns true on success, false on denial. */
87774+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
87775
87776 static inline int ptrace_reparented(struct task_struct *child)
87777 {
87778diff --git a/include/linux/random.h b/include/linux/random.h
87779index 2948046..3262567 100644
87780--- a/include/linux/random.h
87781+++ b/include/linux/random.h
87782@@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
87783 u32 random32(void);
87784 void srandom32(u32 seed);
87785
87786+static inline unsigned long pax_get_random_long(void)
87787+{
87788+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
87789+}
87790+
87791 #endif /* __KERNEL___ */
87792
87793 #endif /* _LINUX_RANDOM_H */
87794diff --git a/include/linux/reboot.h b/include/linux/reboot.h
87795index 988e55f..17cb4ef 100644
87796--- a/include/linux/reboot.h
87797+++ b/include/linux/reboot.h
87798@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
87799 * Architecture-specific implementations of sys_reboot commands.
87800 */
87801
87802-extern void machine_restart(char *cmd);
87803-extern void machine_halt(void);
87804-extern void machine_power_off(void);
87805+extern void machine_restart(char *cmd) __noreturn;
87806+extern void machine_halt(void) __noreturn;
87807+extern void machine_power_off(void) __noreturn;
87808
87809 extern void machine_shutdown(void);
87810 struct pt_regs;
87811@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
87812 */
87813
87814 extern void kernel_restart_prepare(char *cmd);
87815-extern void kernel_restart(char *cmd);
87816-extern void kernel_halt(void);
87817-extern void kernel_power_off(void);
87818+extern void kernel_restart(char *cmd) __noreturn;
87819+extern void kernel_halt(void) __noreturn;
87820+extern void kernel_power_off(void) __noreturn;
87821
87822 void ctrl_alt_del(void);
87823
87824@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
87825 * Emergency restart, callable from an interrupt handler.
87826 */
87827
87828-extern void emergency_restart(void);
87829+extern void emergency_restart(void) __noreturn;
87830 #include <asm/emergency-restart.h>
87831
87832 #endif
87833diff --git a/include/linux/regset.h b/include/linux/regset.h
87834index 8abee65..5150fd1 100644
87835--- a/include/linux/regset.h
87836+++ b/include/linux/regset.h
87837@@ -335,6 +335,9 @@ static inline int copy_regset_to_user(struct task_struct *target,
87838 {
87839 const struct user_regset *regset = &view->regsets[setno];
87840
87841+ if (!regset->get)
87842+ return -EOPNOTSUPP;
87843+
87844 if (!access_ok(VERIFY_WRITE, data, size))
87845 return -EIO;
87846
87847@@ -358,6 +361,9 @@ static inline int copy_regset_from_user(struct task_struct *target,
87848 {
87849 const struct user_regset *regset = &view->regsets[setno];
87850
87851+ if (!regset->set)
87852+ return -EOPNOTSUPP;
87853+
87854 if (!access_ok(VERIFY_READ, data, size))
87855 return -EIO;
87856
87857diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
87858index dd31e7b..5b03c5c 100644
87859--- a/include/linux/reiserfs_fs.h
87860+++ b/include/linux/reiserfs_fs.h
87861@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
87862 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
87863
87864 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
87865-#define get_generation(s) atomic_read (&fs_generation(s))
87866+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
87867 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
87868 #define __fs_changed(gen,s) (gen != get_generation (s))
87869 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
87870@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
87871 */
87872
87873 struct item_operations {
87874- int (*bytes_number) (struct item_head * ih, int block_size);
87875- void (*decrement_key) (struct cpu_key *);
87876- int (*is_left_mergeable) (struct reiserfs_key * ih,
87877+ int (* const bytes_number) (struct item_head * ih, int block_size);
87878+ void (* const decrement_key) (struct cpu_key *);
87879+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
87880 unsigned long bsize);
87881- void (*print_item) (struct item_head *, char *item);
87882- void (*check_item) (struct item_head *, char *item);
87883+ void (* const print_item) (struct item_head *, char *item);
87884+ void (* const check_item) (struct item_head *, char *item);
87885
87886- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
87887+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
87888 int is_affected, int insert_size);
87889- int (*check_left) (struct virtual_item * vi, int free,
87890+ int (* const check_left) (struct virtual_item * vi, int free,
87891 int start_skip, int end_skip);
87892- int (*check_right) (struct virtual_item * vi, int free);
87893- int (*part_size) (struct virtual_item * vi, int from, int to);
87894- int (*unit_num) (struct virtual_item * vi);
87895- void (*print_vi) (struct virtual_item * vi);
87896+ int (* const check_right) (struct virtual_item * vi, int free);
87897+ int (* const part_size) (struct virtual_item * vi, int from, int to);
87898+ int (* const unit_num) (struct virtual_item * vi);
87899+ void (* const print_vi) (struct virtual_item * vi);
87900 };
87901
87902-extern struct item_operations *item_ops[TYPE_ANY + 1];
87903+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
87904
87905 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
87906 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
87907diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
87908index dab68bb..0688727 100644
87909--- a/include/linux/reiserfs_fs_sb.h
87910+++ b/include/linux/reiserfs_fs_sb.h
87911@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
87912 /* Comment? -Hans */
87913 wait_queue_head_t s_wait;
87914 /* To be obsoleted soon by per buffer seals.. -Hans */
87915- atomic_t s_generation_counter; // increased by one every time the
87916+ atomic_unchecked_t s_generation_counter; // increased by one every time the
87917 // tree gets re-balanced
87918 unsigned long s_properties; /* File system properties. Currently holds
87919 on-disk FS format */
87920diff --git a/include/linux/relay.h b/include/linux/relay.h
87921index 14a86bc..17d0700 100644
87922--- a/include/linux/relay.h
87923+++ b/include/linux/relay.h
87924@@ -159,7 +159,7 @@ struct rchan_callbacks
87925 * The callback should return 0 if successful, negative if not.
87926 */
87927 int (*remove_buf_file)(struct dentry *dentry);
87928-};
87929+} __no_const;
87930
87931 /*
87932 * CONFIG_RELAY kernel API, kernel/relay.c
87933diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
87934index 3392c59..a746428 100644
87935--- a/include/linux/rfkill.h
87936+++ b/include/linux/rfkill.h
87937@@ -144,6 +144,7 @@ struct rfkill_ops {
87938 void (*query)(struct rfkill *rfkill, void *data);
87939 int (*set_block)(void *data, bool blocked);
87940 };
87941+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
87942
87943 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
87944 /**
87945diff --git a/include/linux/sched.h b/include/linux/sched.h
87946index 71849bf..8cf9dd2 100644
87947--- a/include/linux/sched.h
87948+++ b/include/linux/sched.h
87949@@ -101,6 +101,7 @@ struct bio;
87950 struct fs_struct;
87951 struct bts_context;
87952 struct perf_event_context;
87953+struct linux_binprm;
87954
87955 /*
87956 * List of flags we want to share for kernel threads,
87957@@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout);
87958 extern signed long schedule_timeout_uninterruptible(signed long timeout);
87959 asmlinkage void __schedule(void);
87960 asmlinkage void schedule(void);
87961-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
87962+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
87963
87964 struct nsproxy;
87965 struct user_namespace;
87966@@ -371,9 +372,12 @@ struct user_namespace;
87967 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
87968
87969 extern int sysctl_max_map_count;
87970+extern unsigned long sysctl_heap_stack_gap;
87971
87972 #include <linux/aio.h>
87973
87974+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
87975+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
87976 extern unsigned long
87977 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
87978 unsigned long, unsigned long);
87979@@ -666,6 +670,16 @@ struct signal_struct {
87980 struct tty_audit_buf *tty_audit_buf;
87981 #endif
87982
87983+#ifdef CONFIG_GRKERNSEC
87984+ u32 curr_ip;
87985+ u32 saved_ip;
87986+ u32 gr_saddr;
87987+ u32 gr_daddr;
87988+ u16 gr_sport;
87989+ u16 gr_dport;
87990+ u8 used_accept:1;
87991+#endif
87992+
87993 int oom_adj; /* OOM kill score adjustment (bit shift) */
87994 };
87995
87996@@ -723,6 +737,11 @@ struct user_struct {
87997 struct key *session_keyring; /* UID's default session keyring */
87998 #endif
87999
88000+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
88001+ unsigned int banned;
88002+ unsigned long ban_expires;
88003+#endif
88004+
88005 /* Hash table maintenance information */
88006 struct hlist_node uidhash_node;
88007 uid_t uid;
88008@@ -1328,8 +1347,8 @@ struct task_struct {
88009 struct list_head thread_group;
88010
88011 struct completion *vfork_done; /* for vfork() */
88012- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
88013- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
88014+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
88015+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
88016
88017 cputime_t utime, stime, utimescaled, stimescaled;
88018 cputime_t gtime;
88019@@ -1343,16 +1362,6 @@ struct task_struct {
88020 struct task_cputime cputime_expires;
88021 struct list_head cpu_timers[3];
88022
88023-/* process credentials */
88024- const struct cred *real_cred; /* objective and real subjective task
88025- * credentials (COW) */
88026- const struct cred *cred; /* effective (overridable) subjective task
88027- * credentials (COW) */
88028- struct mutex cred_guard_mutex; /* guard against foreign influences on
88029- * credential calculations
88030- * (notably. ptrace) */
88031- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
88032-
88033 char comm[TASK_COMM_LEN]; /* executable name excluding path
88034 - access with [gs]et_task_comm (which lock
88035 it with task_lock())
88036@@ -1369,6 +1378,10 @@ struct task_struct {
88037 #endif
88038 /* CPU-specific state of this task */
88039 struct thread_struct thread;
88040+/* thread_info moved to task_struct */
88041+#ifdef CONFIG_X86
88042+ struct thread_info tinfo;
88043+#endif
88044 /* filesystem information */
88045 struct fs_struct *fs;
88046 /* open file information */
88047@@ -1436,6 +1449,15 @@ struct task_struct {
88048 int hardirq_context;
88049 int softirq_context;
88050 #endif
88051+
88052+/* process credentials */
88053+ const struct cred *real_cred; /* objective and real subjective task
88054+ * credentials (COW) */
88055+ struct mutex cred_guard_mutex; /* guard against foreign influences on
88056+ * credential calculations
88057+ * (notably. ptrace) */
88058+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
88059+
88060 #ifdef CONFIG_LOCKDEP
88061 # define MAX_LOCK_DEPTH 48UL
88062 u64 curr_chain_key;
88063@@ -1456,6 +1478,9 @@ struct task_struct {
88064
88065 struct backing_dev_info *backing_dev_info;
88066
88067+ const struct cred *cred; /* effective (overridable) subjective task
88068+ * credentials (COW) */
88069+
88070 struct io_context *io_context;
88071
88072 unsigned long ptrace_message;
88073@@ -1519,6 +1544,27 @@ struct task_struct {
88074 unsigned long default_timer_slack_ns;
88075
88076 struct list_head *scm_work_list;
88077+
88078+#ifdef CONFIG_GRKERNSEC
88079+ /* grsecurity */
88080+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
88081+ u64 exec_id;
88082+#endif
88083+#ifdef CONFIG_GRKERNSEC_SETXID
88084+ const struct cred *delayed_cred;
88085+#endif
88086+ struct dentry *gr_chroot_dentry;
88087+ struct acl_subject_label *acl;
88088+ struct acl_role_label *role;
88089+ struct file *exec_file;
88090+ u16 acl_role_id;
88091+ /* is this the task that authenticated to the special role */
88092+ u8 acl_sp_role;
88093+ u8 is_writable;
88094+ u8 brute;
88095+ u8 gr_is_chrooted;
88096+#endif
88097+
88098 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
88099 /* Index of current stored adress in ret_stack */
88100 int curr_ret_stack;
88101@@ -1542,6 +1588,57 @@ struct task_struct {
88102 #endif /* CONFIG_TRACING */
88103 };
88104
88105+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
88106+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
88107+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
88108+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
88109+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
88110+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
88111+
88112+#ifdef CONFIG_PAX_SOFTMODE
88113+extern int pax_softmode;
88114+#endif
88115+
88116+extern int pax_check_flags(unsigned long *);
88117+
88118+/* if tsk != current then task_lock must be held on it */
88119+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
88120+static inline unsigned long pax_get_flags(struct task_struct *tsk)
88121+{
88122+ if (likely(tsk->mm))
88123+ return tsk->mm->pax_flags;
88124+ else
88125+ return 0UL;
88126+}
88127+
88128+/* if tsk != current then task_lock must be held on it */
88129+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
88130+{
88131+ if (likely(tsk->mm)) {
88132+ tsk->mm->pax_flags = flags;
88133+ return 0;
88134+ }
88135+ return -EINVAL;
88136+}
88137+#endif
88138+
88139+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
88140+extern void pax_set_initial_flags(struct linux_binprm *bprm);
88141+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
88142+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
88143+#endif
88144+
88145+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
88146+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
88147+extern void pax_report_refcount_overflow(struct pt_regs *regs);
88148+extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
88149+
88150+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
88151+extern void pax_track_stack(void);
88152+#else
88153+static inline void pax_track_stack(void) {}
88154+#endif
88155+
88156 /* Future-safe accessor for struct task_struct's cpus_allowed. */
88157 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
88158
88159@@ -1740,7 +1837,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
88160 #define PF_DUMPCORE 0x00000200 /* dumped core */
88161 #define PF_SIGNALED 0x00000400 /* killed by a signal */
88162 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
88163-#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
88164+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
88165 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
88166 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
88167 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
88168@@ -1978,7 +2075,9 @@ void yield(void);
88169 extern struct exec_domain default_exec_domain;
88170
88171 union thread_union {
88172+#ifndef CONFIG_X86
88173 struct thread_info thread_info;
88174+#endif
88175 unsigned long stack[THREAD_SIZE/sizeof(long)];
88176 };
88177
88178@@ -2011,6 +2110,7 @@ extern struct pid_namespace init_pid_ns;
88179 */
88180
88181 extern struct task_struct *find_task_by_vpid(pid_t nr);
88182+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
88183 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
88184 struct pid_namespace *ns);
88185
88186@@ -2155,7 +2255,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
88187 extern void exit_itimers(struct signal_struct *);
88188 extern void flush_itimer_signals(void);
88189
88190-extern NORET_TYPE void do_group_exit(int);
88191+extern __noreturn void do_group_exit(int);
88192
88193 extern void daemonize(const char *, ...);
88194 extern int allow_signal(int);
88195@@ -2284,13 +2384,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
88196
88197 #endif
88198
88199-static inline int object_is_on_stack(void *obj)
88200+static inline int object_starts_on_stack(void *obj)
88201 {
88202- void *stack = task_stack_page(current);
88203+ const void *stack = task_stack_page(current);
88204
88205 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
88206 }
88207
88208+#ifdef CONFIG_PAX_USERCOPY
88209+extern int object_is_on_stack(const void *obj, unsigned long len);
88210+#endif
88211+
88212 extern void thread_info_cache_init(void);
88213
88214 #ifdef CONFIG_DEBUG_STACK_USAGE
88215@@ -2616,6 +2720,23 @@ static inline unsigned long rlimit_max(unsigned int limit)
88216 return task_rlimit_max(current, limit);
88217 }
88218
88219+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
88220+DECLARE_PER_CPU(u64, exec_counter);
88221+static inline void increment_exec_counter(void)
88222+{
88223+ unsigned int cpu;
88224+ u64 *exec_id_ptr;
88225+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
88226+ cpu = get_cpu();
88227+ exec_id_ptr = &per_cpu(exec_counter, cpu);
88228+ *exec_id_ptr += 1ULL << 16;
88229+ current->exec_id = *exec_id_ptr;
88230+ put_cpu();
88231+}
88232+#else
88233+static inline void increment_exec_counter(void) {}
88234+#endif
88235+
88236 #endif /* __KERNEL__ */
88237
88238 #endif
88239diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
88240index 1ee2c05..81b7ec4 100644
88241--- a/include/linux/screen_info.h
88242+++ b/include/linux/screen_info.h
88243@@ -42,7 +42,8 @@ struct screen_info {
88244 __u16 pages; /* 0x32 */
88245 __u16 vesa_attributes; /* 0x34 */
88246 __u32 capabilities; /* 0x36 */
88247- __u8 _reserved[6]; /* 0x3a */
88248+ __u16 vesapm_size; /* 0x3a */
88249+ __u8 _reserved[4]; /* 0x3c */
88250 } __attribute__((packed));
88251
88252 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
88253diff --git a/include/linux/security.h b/include/linux/security.h
88254index d40d23f..d739b08 100644
88255--- a/include/linux/security.h
88256+++ b/include/linux/security.h
88257@@ -34,6 +34,7 @@
88258 #include <linux/key.h>
88259 #include <linux/xfrm.h>
88260 #include <linux/gfp.h>
88261+#include <linux/grsecurity.h>
88262 #include <net/flow.h>
88263
88264 /* Maximum number of letters for an LSM name string */
88265@@ -76,7 +77,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
88266 extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
88267 extern int cap_task_setioprio(struct task_struct *p, int ioprio);
88268 extern int cap_task_setnice(struct task_struct *p, int nice);
88269-extern int cap_syslog(int type);
88270+extern int cap_syslog(int type, bool from_file);
88271 extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
88272
88273 struct msghdr;
88274@@ -1331,6 +1332,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
88275 * logging to the console.
88276 * See the syslog(2) manual page for an explanation of the @type values.
88277 * @type contains the type of action.
88278+ * @from_file indicates the context of action (if it came from /proc).
88279 * Return 0 if permission is granted.
88280 * @settime:
88281 * Check permission to change the system time.
88282@@ -1445,7 +1447,7 @@ struct security_operations {
88283 int (*sysctl) (struct ctl_table *table, int op);
88284 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
88285 int (*quota_on) (struct dentry *dentry);
88286- int (*syslog) (int type);
88287+ int (*syslog) (int type, bool from_file);
88288 int (*settime) (struct timespec *ts, struct timezone *tz);
88289 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
88290
88291@@ -1740,7 +1742,7 @@ int security_acct(struct file *file);
88292 int security_sysctl(struct ctl_table *table, int op);
88293 int security_quotactl(int cmds, int type, int id, struct super_block *sb);
88294 int security_quota_on(struct dentry *dentry);
88295-int security_syslog(int type);
88296+int security_syslog(int type, bool from_file);
88297 int security_settime(struct timespec *ts, struct timezone *tz);
88298 int security_vm_enough_memory(long pages);
88299 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
88300@@ -1986,9 +1988,9 @@ static inline int security_quota_on(struct dentry *dentry)
88301 return 0;
88302 }
88303
88304-static inline int security_syslog(int type)
88305+static inline int security_syslog(int type, bool from_file)
88306 {
88307- return cap_syslog(type);
88308+ return cap_syslog(type, from_file);
88309 }
88310
88311 static inline int security_settime(struct timespec *ts, struct timezone *tz)
88312diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
88313index 8366d8f..cc5f9d6 100644
88314--- a/include/linux/seq_file.h
88315+++ b/include/linux/seq_file.h
88316@@ -23,6 +23,9 @@ struct seq_file {
88317 u64 version;
88318 struct mutex lock;
88319 const struct seq_operations *op;
88320+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
88321+ u64 exec_id;
88322+#endif
88323 void *private;
88324 };
88325
88326@@ -32,6 +35,7 @@ struct seq_operations {
88327 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
88328 int (*show) (struct seq_file *m, void *v);
88329 };
88330+typedef struct seq_operations __no_const seq_operations_no_const;
88331
88332 #define SEQ_SKIP 1
88333
88334diff --git a/include/linux/shm.h b/include/linux/shm.h
88335index eca6235..c7417ed 100644
88336--- a/include/linux/shm.h
88337+++ b/include/linux/shm.h
88338@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */
88339 pid_t shm_cprid;
88340 pid_t shm_lprid;
88341 struct user_struct *mlock_user;
88342+#ifdef CONFIG_GRKERNSEC
88343+ time_t shm_createtime;
88344+ pid_t shm_lapid;
88345+#endif
88346 };
88347
88348 /* shm_mode upper byte flags */
88349diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
88350index bcdd660..fd2e332 100644
88351--- a/include/linux/skbuff.h
88352+++ b/include/linux/skbuff.h
88353@@ -14,6 +14,7 @@
88354 #ifndef _LINUX_SKBUFF_H
88355 #define _LINUX_SKBUFF_H
88356
88357+#include <linux/const.h>
88358 #include <linux/kernel.h>
88359 #include <linux/kmemcheck.h>
88360 #include <linux/compiler.h>
88361@@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
88362 */
88363 static inline int skb_queue_empty(const struct sk_buff_head *list)
88364 {
88365- return list->next == (struct sk_buff *)list;
88366+ return list->next == (const struct sk_buff *)list;
88367 }
88368
88369 /**
88370@@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
88371 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
88372 const struct sk_buff *skb)
88373 {
88374- return (skb->next == (struct sk_buff *) list);
88375+ return (skb->next == (const struct sk_buff *) list);
88376 }
88377
88378 /**
88379@@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
88380 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
88381 const struct sk_buff *skb)
88382 {
88383- return (skb->prev == (struct sk_buff *) list);
88384+ return (skb->prev == (const struct sk_buff *) list);
88385 }
88386
88387 /**
88388@@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
88389 * headroom, you should not reduce this.
88390 */
88391 #ifndef NET_SKB_PAD
88392-#define NET_SKB_PAD 32
88393+#define NET_SKB_PAD (_AC(32,UL))
88394 #endif
88395
88396 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
88397@@ -1489,6 +1490,22 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
88398 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
88399 }
88400
88401+static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
88402+ unsigned int length, gfp_t gfp)
88403+{
88404+ struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
88405+
88406+ if (NET_IP_ALIGN && skb)
88407+ skb_reserve(skb, NET_IP_ALIGN);
88408+ return skb;
88409+}
88410+
88411+static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
88412+ unsigned int length)
88413+{
88414+ return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
88415+}
88416+
88417 extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask);
88418
88419 /**
88420diff --git a/include/linux/slab.h b/include/linux/slab.h
88421index 2da8372..9e01add 100644
88422--- a/include/linux/slab.h
88423+++ b/include/linux/slab.h
88424@@ -11,12 +11,20 @@
88425
88426 #include <linux/gfp.h>
88427 #include <linux/types.h>
88428+#include <linux/err.h>
88429
88430 /*
88431 * Flags to pass to kmem_cache_create().
88432 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
88433 */
88434 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
88435+
88436+#ifdef CONFIG_PAX_USERCOPY
88437+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
88438+#else
88439+#define SLAB_USERCOPY 0x00000000UL
88440+#endif
88441+
88442 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
88443 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
88444 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
88445@@ -82,10 +90,13 @@
88446 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
88447 * Both make kfree a no-op.
88448 */
88449-#define ZERO_SIZE_PTR ((void *)16)
88450+#define ZERO_SIZE_PTR \
88451+({ \
88452+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
88453+ (void *)(-MAX_ERRNO-1L); \
88454+})
88455
88456-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
88457- (unsigned long)ZERO_SIZE_PTR)
88458+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
88459
88460 /*
88461 * struct kmem_cache related prototypes
88462@@ -133,11 +144,12 @@ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
88463 /*
88464 * Common kmalloc functions provided by all allocators
88465 */
88466-void * __must_check __krealloc(const void *, size_t, gfp_t);
88467-void * __must_check krealloc(const void *, size_t, gfp_t);
88468+void * __must_check __krealloc(const void *, size_t, gfp_t) __size_overflow(2);
88469+void * __must_check krealloc(const void *, size_t, gfp_t) __size_overflow(2);
88470 void kfree(const void *);
88471 void kzfree(const void *);
88472 size_t ksize(const void *);
88473+void check_object_size(const void *ptr, unsigned long n, bool to);
88474
88475 /*
88476 * Allocator specific definitions. These are mainly used to establish optimized
88477@@ -263,7 +275,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
88478 * request comes from.
88479 */
88480 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
88481-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
88482+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
88483 #define kmalloc_track_caller(size, flags) \
88484 __kmalloc_track_caller(size, flags, _RET_IP_)
88485 #else
88486@@ -281,7 +293,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
88487 * allocation request comes from.
88488 */
88489 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
88490-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
88491+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
88492 #define kmalloc_node_track_caller(size, flags, node) \
88493 __kmalloc_node_track_caller(size, flags, node, \
88494 _RET_IP_)
88495diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
88496index 850d057..33bad48 100644
88497--- a/include/linux/slab_def.h
88498+++ b/include/linux/slab_def.h
88499@@ -69,10 +69,10 @@ struct kmem_cache {
88500 unsigned long node_allocs;
88501 unsigned long node_frees;
88502 unsigned long node_overflow;
88503- atomic_t allochit;
88504- atomic_t allocmiss;
88505- atomic_t freehit;
88506- atomic_t freemiss;
88507+ atomic_unchecked_t allochit;
88508+ atomic_unchecked_t allocmiss;
88509+ atomic_unchecked_t freehit;
88510+ atomic_unchecked_t freemiss;
88511
88512 /*
88513 * If debugging is enabled, then the allocator can add additional
88514@@ -108,7 +108,7 @@ struct cache_sizes {
88515 extern struct cache_sizes malloc_sizes[];
88516
88517 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
88518-void *__kmalloc(size_t size, gfp_t flags);
88519+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
88520
88521 #ifdef CONFIG_KMEMTRACE
88522 extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
88523@@ -125,6 +125,7 @@ static inline size_t slab_buffer_size(struct kmem_cache *cachep)
88524 }
88525 #endif
88526
88527+static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
88528 static __always_inline void *kmalloc(size_t size, gfp_t flags)
88529 {
88530 struct kmem_cache *cachep;
88531@@ -163,7 +164,7 @@ found:
88532 }
88533
88534 #ifdef CONFIG_NUMA
88535-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
88536+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88537 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
88538
88539 #ifdef CONFIG_KMEMTRACE
88540@@ -180,6 +181,7 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
88541 }
88542 #endif
88543
88544+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88545 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
88546 {
88547 struct kmem_cache *cachep;
88548diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
88549index 0ec00b3..65e7e0e 100644
88550--- a/include/linux/slob_def.h
88551+++ b/include/linux/slob_def.h
88552@@ -9,8 +9,9 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
88553 return kmem_cache_alloc_node(cachep, flags, -1);
88554 }
88555
88556-void *__kmalloc_node(size_t size, gfp_t flags, int node);
88557+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88558
88559+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88560 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
88561 {
88562 return __kmalloc_node(size, flags, node);
88563@@ -24,11 +25,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
88564 * kmalloc is the normal method of allocating memory
88565 * in the kernel.
88566 */
88567+static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
88568 static __always_inline void *kmalloc(size_t size, gfp_t flags)
88569 {
88570 return __kmalloc_node(size, flags, -1);
88571 }
88572
88573+static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
88574 static __always_inline void *__kmalloc(size_t size, gfp_t flags)
88575 {
88576 return kmalloc(size, flags);
88577diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
88578index 5ad70a6..8f0e2c8 100644
88579--- a/include/linux/slub_def.h
88580+++ b/include/linux/slub_def.h
88581@@ -86,7 +86,7 @@ struct kmem_cache {
88582 struct kmem_cache_order_objects max;
88583 struct kmem_cache_order_objects min;
88584 gfp_t allocflags; /* gfp flags to use on each alloc */
88585- int refcount; /* Refcount for slab cache destroy */
88586+ atomic_t refcount; /* Refcount for slab cache destroy */
88587 void (*ctor)(void *);
88588 int inuse; /* Offset to metadata */
88589 int align; /* Alignment */
88590@@ -197,6 +197,7 @@ static __always_inline int kmalloc_index(size_t size)
88591 * This ought to end up with a global pointer to the right cache
88592 * in kmalloc_caches.
88593 */
88594+static __always_inline struct kmem_cache *kmalloc_slab(size_t size) __size_overflow(1);
88595 static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
88596 {
88597 int index = kmalloc_index(size);
88598@@ -215,7 +216,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
88599 #endif
88600
88601 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
88602-void *__kmalloc(size_t size, gfp_t flags);
88603+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
88604
88605 #ifdef CONFIG_KMEMTRACE
88606 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
88607@@ -227,6 +228,7 @@ kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
88608 }
88609 #endif
88610
88611+static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
88612 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
88613 {
88614 unsigned int order = get_order(size);
88615@@ -238,6 +240,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
88616 return ret;
88617 }
88618
88619+static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
88620 static __always_inline void *kmalloc(size_t size, gfp_t flags)
88621 {
88622 void *ret;
88623@@ -263,7 +266,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
88624 }
88625
88626 #ifdef CONFIG_NUMA
88627-void *__kmalloc_node(size_t size, gfp_t flags, int node);
88628+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88629 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
88630
88631 #ifdef CONFIG_KMEMTRACE
88632@@ -280,6 +283,7 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *s,
88633 }
88634 #endif
88635
88636+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88637 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
88638 {
88639 void *ret;
88640diff --git a/include/linux/sonet.h b/include/linux/sonet.h
88641index 67ad11f..0bbd8af 100644
88642--- a/include/linux/sonet.h
88643+++ b/include/linux/sonet.h
88644@@ -61,7 +61,7 @@ struct sonet_stats {
88645 #include <asm/atomic.h>
88646
88647 struct k_sonet_stats {
88648-#define __HANDLE_ITEM(i) atomic_t i
88649+#define __HANDLE_ITEM(i) atomic_unchecked_t i
88650 __SONET_ITEMS
88651 #undef __HANDLE_ITEM
88652 };
88653diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
88654index 6f52b4d..5500323 100644
88655--- a/include/linux/sunrpc/cache.h
88656+++ b/include/linux/sunrpc/cache.h
88657@@ -125,7 +125,7 @@ struct cache_detail {
88658 */
88659 struct cache_req {
88660 struct cache_deferred_req *(*defer)(struct cache_req *req);
88661-};
88662+} __no_const;
88663 /* this must be embedded in a deferred_request that is being
88664 * delayed awaiting cache-fill
88665 */
88666diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
88667index 8ed9642..101ceab 100644
88668--- a/include/linux/sunrpc/clnt.h
88669+++ b/include/linux/sunrpc/clnt.h
88670@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
88671 {
88672 switch (sap->sa_family) {
88673 case AF_INET:
88674- return ntohs(((struct sockaddr_in *)sap)->sin_port);
88675+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
88676 case AF_INET6:
88677- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
88678+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
88679 }
88680 return 0;
88681 }
88682@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
88683 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
88684 const struct sockaddr *src)
88685 {
88686- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
88687+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
88688 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
88689
88690 dsin->sin_family = ssin->sin_family;
88691@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
88692 if (sa->sa_family != AF_INET6)
88693 return 0;
88694
88695- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
88696+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
88697 }
88698
88699 #endif /* __KERNEL__ */
88700diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
88701index c14fe86..393245e 100644
88702--- a/include/linux/sunrpc/svc_rdma.h
88703+++ b/include/linux/sunrpc/svc_rdma.h
88704@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
88705 extern unsigned int svcrdma_max_requests;
88706 extern unsigned int svcrdma_max_req_size;
88707
88708-extern atomic_t rdma_stat_recv;
88709-extern atomic_t rdma_stat_read;
88710-extern atomic_t rdma_stat_write;
88711-extern atomic_t rdma_stat_sq_starve;
88712-extern atomic_t rdma_stat_rq_starve;
88713-extern atomic_t rdma_stat_rq_poll;
88714-extern atomic_t rdma_stat_rq_prod;
88715-extern atomic_t rdma_stat_sq_poll;
88716-extern atomic_t rdma_stat_sq_prod;
88717+extern atomic_unchecked_t rdma_stat_recv;
88718+extern atomic_unchecked_t rdma_stat_read;
88719+extern atomic_unchecked_t rdma_stat_write;
88720+extern atomic_unchecked_t rdma_stat_sq_starve;
88721+extern atomic_unchecked_t rdma_stat_rq_starve;
88722+extern atomic_unchecked_t rdma_stat_rq_poll;
88723+extern atomic_unchecked_t rdma_stat_rq_prod;
88724+extern atomic_unchecked_t rdma_stat_sq_poll;
88725+extern atomic_unchecked_t rdma_stat_sq_prod;
88726
88727 #define RPCRDMA_VERSION 1
88728
88729diff --git a/include/linux/suspend.h b/include/linux/suspend.h
88730index 5e781d8..1e62818 100644
88731--- a/include/linux/suspend.h
88732+++ b/include/linux/suspend.h
88733@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
88734 * which require special recovery actions in that situation.
88735 */
88736 struct platform_suspend_ops {
88737- int (*valid)(suspend_state_t state);
88738- int (*begin)(suspend_state_t state);
88739- int (*prepare)(void);
88740- int (*prepare_late)(void);
88741- int (*enter)(suspend_state_t state);
88742- void (*wake)(void);
88743- void (*finish)(void);
88744- void (*end)(void);
88745- void (*recover)(void);
88746+ int (* const valid)(suspend_state_t state);
88747+ int (* const begin)(suspend_state_t state);
88748+ int (* const prepare)(void);
88749+ int (* const prepare_late)(void);
88750+ int (* const enter)(suspend_state_t state);
88751+ void (* const wake)(void);
88752+ void (* const finish)(void);
88753+ void (* const end)(void);
88754+ void (* const recover)(void);
88755 };
88756
88757 #ifdef CONFIG_SUSPEND
88758@@ -120,7 +120,7 @@ struct platform_suspend_ops {
88759 * suspend_set_ops - set platform dependent suspend operations
88760 * @ops: The new suspend operations to set.
88761 */
88762-extern void suspend_set_ops(struct platform_suspend_ops *ops);
88763+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
88764 extern int suspend_valid_only_mem(suspend_state_t state);
88765
88766 /**
88767@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state);
88768 #else /* !CONFIG_SUSPEND */
88769 #define suspend_valid_only_mem NULL
88770
88771-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
88772+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
88773 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
88774 #endif /* !CONFIG_SUSPEND */
88775
88776@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone);
88777 * platforms which require special recovery actions in that situation.
88778 */
88779 struct platform_hibernation_ops {
88780- int (*begin)(void);
88781- void (*end)(void);
88782- int (*pre_snapshot)(void);
88783- void (*finish)(void);
88784- int (*prepare)(void);
88785- int (*enter)(void);
88786- void (*leave)(void);
88787- int (*pre_restore)(void);
88788- void (*restore_cleanup)(void);
88789- void (*recover)(void);
88790+ int (* const begin)(void);
88791+ void (* const end)(void);
88792+ int (* const pre_snapshot)(void);
88793+ void (* const finish)(void);
88794+ int (* const prepare)(void);
88795+ int (* const enter)(void);
88796+ void (* const leave)(void);
88797+ int (* const pre_restore)(void);
88798+ void (* const restore_cleanup)(void);
88799+ void (* const recover)(void);
88800 };
88801
88802 #ifdef CONFIG_HIBERNATION
88803@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *);
88804 extern void swsusp_unset_page_free(struct page *);
88805 extern unsigned long get_safe_page(gfp_t gfp_mask);
88806
88807-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
88808+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
88809 extern int hibernate(void);
88810 extern bool system_entering_hibernation(void);
88811 #else /* CONFIG_HIBERNATION */
88812@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
88813 static inline void swsusp_set_page_free(struct page *p) {}
88814 static inline void swsusp_unset_page_free(struct page *p) {}
88815
88816-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
88817+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
88818 static inline int hibernate(void) { return -ENOSYS; }
88819 static inline bool system_entering_hibernation(void) { return false; }
88820 #endif /* CONFIG_HIBERNATION */
88821diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
88822index 0eb6942..a805cb6 100644
88823--- a/include/linux/sysctl.h
88824+++ b/include/linux/sysctl.h
88825@@ -164,7 +164,11 @@ enum
88826 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
88827 };
88828
88829-
88830+#ifdef CONFIG_PAX_SOFTMODE
88831+enum {
88832+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
88833+};
88834+#endif
88835
88836 /* CTL_VM names: */
88837 enum
88838@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
88839
88840 extern int proc_dostring(struct ctl_table *, int,
88841 void __user *, size_t *, loff_t *);
88842+extern int proc_dostring_modpriv(struct ctl_table *, int,
88843+ void __user *, size_t *, loff_t *);
88844 extern int proc_dointvec(struct ctl_table *, int,
88845 void __user *, size_t *, loff_t *);
88846 extern int proc_dointvec_minmax(struct ctl_table *, int,
88847@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen,
88848
88849 extern ctl_handler sysctl_data;
88850 extern ctl_handler sysctl_string;
88851+extern ctl_handler sysctl_string_modpriv;
88852 extern ctl_handler sysctl_intvec;
88853 extern ctl_handler sysctl_jiffies;
88854 extern ctl_handler sysctl_ms_jiffies;
88855diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
88856index 9d68fed..71f02cc 100644
88857--- a/include/linux/sysfs.h
88858+++ b/include/linux/sysfs.h
88859@@ -75,8 +75,8 @@ struct bin_attribute {
88860 };
88861
88862 struct sysfs_ops {
88863- ssize_t (*show)(struct kobject *, struct attribute *,char *);
88864- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
88865+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
88866+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
88867 };
88868
88869 struct sysfs_dirent;
88870diff --git a/include/linux/syslog.h b/include/linux/syslog.h
88871new file mode 100644
88872index 0000000..3891139
88873--- /dev/null
88874+++ b/include/linux/syslog.h
88875@@ -0,0 +1,52 @@
88876+/* Syslog internals
88877+ *
88878+ * Copyright 2010 Canonical, Ltd.
88879+ * Author: Kees Cook <kees.cook@canonical.com>
88880+ *
88881+ * This program is free software; you can redistribute it and/or modify
88882+ * it under the terms of the GNU General Public License as published by
88883+ * the Free Software Foundation; either version 2, or (at your option)
88884+ * any later version.
88885+ *
88886+ * This program is distributed in the hope that it will be useful,
88887+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
88888+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
88889+ * GNU General Public License for more details.
88890+ *
88891+ * You should have received a copy of the GNU General Public License
88892+ * along with this program; see the file COPYING. If not, write to
88893+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
88894+ */
88895+
88896+#ifndef _LINUX_SYSLOG_H
88897+#define _LINUX_SYSLOG_H
88898+
88899+/* Close the log. Currently a NOP. */
88900+#define SYSLOG_ACTION_CLOSE 0
88901+/* Open the log. Currently a NOP. */
88902+#define SYSLOG_ACTION_OPEN 1
88903+/* Read from the log. */
88904+#define SYSLOG_ACTION_READ 2
88905+/* Read all messages remaining in the ring buffer. */
88906+#define SYSLOG_ACTION_READ_ALL 3
88907+/* Read and clear all messages remaining in the ring buffer */
88908+#define SYSLOG_ACTION_READ_CLEAR 4
88909+/* Clear ring buffer. */
88910+#define SYSLOG_ACTION_CLEAR 5
88911+/* Disable printk's to console */
88912+#define SYSLOG_ACTION_CONSOLE_OFF 6
88913+/* Enable printk's to console */
88914+#define SYSLOG_ACTION_CONSOLE_ON 7
88915+/* Set level of messages printed to console */
88916+#define SYSLOG_ACTION_CONSOLE_LEVEL 8
88917+/* Return number of unread characters in the log buffer */
88918+#define SYSLOG_ACTION_SIZE_UNREAD 9
88919+/* Return size of the log buffer */
88920+#define SYSLOG_ACTION_SIZE_BUFFER 10
88921+
88922+#define SYSLOG_FROM_CALL 0
88923+#define SYSLOG_FROM_FILE 1
88924+
88925+int do_syslog(int type, char __user *buf, int count, bool from_file);
88926+
88927+#endif /* _LINUX_SYSLOG_H */
88928diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
88929index a8cc4e1..98d3b85 100644
88930--- a/include/linux/thread_info.h
88931+++ b/include/linux/thread_info.h
88932@@ -23,7 +23,7 @@ struct restart_block {
88933 };
88934 /* For futex_wait and futex_wait_requeue_pi */
88935 struct {
88936- u32 *uaddr;
88937+ u32 __user *uaddr;
88938 u32 val;
88939 u32 flags;
88940 u32 bitset;
88941diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
88942index 1eb44a9..f582df3 100644
88943--- a/include/linux/tracehook.h
88944+++ b/include/linux/tracehook.h
88945@@ -69,12 +69,12 @@ static inline int tracehook_expect_breakpoints(struct task_struct *task)
88946 /*
88947 * ptrace report for syscall entry and exit looks identical.
88948 */
88949-static inline void ptrace_report_syscall(struct pt_regs *regs)
88950+static inline int ptrace_report_syscall(struct pt_regs *regs)
88951 {
88952 int ptrace = task_ptrace(current);
88953
88954 if (!(ptrace & PT_PTRACED))
88955- return;
88956+ return 0;
88957
88958 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
88959
88960@@ -87,6 +87,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
88961 send_sig(current->exit_code, current, 1);
88962 current->exit_code = 0;
88963 }
88964+
88965+ return fatal_signal_pending(current);
88966 }
88967
88968 /**
88969@@ -111,8 +113,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
88970 static inline __must_check int tracehook_report_syscall_entry(
88971 struct pt_regs *regs)
88972 {
88973- ptrace_report_syscall(regs);
88974- return 0;
88975+ return ptrace_report_syscall(regs);
88976 }
88977
88978 /**
88979diff --git a/include/linux/tty.h b/include/linux/tty.h
88980index e9c57e9..ee6d489 100644
88981--- a/include/linux/tty.h
88982+++ b/include/linux/tty.h
88983@@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
88984 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
88985 extern void tty_ldisc_enable(struct tty_struct *tty);
88986
88987-
88988 /* n_tty.c */
88989 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
88990
88991diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
88992index 0c4ee9b..9f7c426 100644
88993--- a/include/linux/tty_ldisc.h
88994+++ b/include/linux/tty_ldisc.h
88995@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
88996
88997 struct module *owner;
88998
88999- int refcount;
89000+ atomic_t refcount;
89001 };
89002
89003 struct tty_ldisc {
89004diff --git a/include/linux/types.h b/include/linux/types.h
89005index c42724f..d190eee 100644
89006--- a/include/linux/types.h
89007+++ b/include/linux/types.h
89008@@ -191,10 +191,26 @@ typedef struct {
89009 volatile int counter;
89010 } atomic_t;
89011
89012+#ifdef CONFIG_PAX_REFCOUNT
89013+typedef struct {
89014+ volatile int counter;
89015+} atomic_unchecked_t;
89016+#else
89017+typedef atomic_t atomic_unchecked_t;
89018+#endif
89019+
89020 #ifdef CONFIG_64BIT
89021 typedef struct {
89022 volatile long counter;
89023 } atomic64_t;
89024+
89025+#ifdef CONFIG_PAX_REFCOUNT
89026+typedef struct {
89027+ volatile long counter;
89028+} atomic64_unchecked_t;
89029+#else
89030+typedef atomic64_t atomic64_unchecked_t;
89031+#endif
89032 #endif
89033
89034 struct ustat {
89035diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
89036index 6b58367..57b150e 100644
89037--- a/include/linux/uaccess.h
89038+++ b/include/linux/uaccess.h
89039@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
89040 long ret; \
89041 mm_segment_t old_fs = get_fs(); \
89042 \
89043- set_fs(KERNEL_DS); \
89044 pagefault_disable(); \
89045- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
89046- pagefault_enable(); \
89047+ set_fs(KERNEL_DS); \
89048+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
89049 set_fs(old_fs); \
89050+ pagefault_enable(); \
89051 ret; \
89052 })
89053
89054@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
89055 * Safely read from address @src to the buffer at @dst. If a kernel fault
89056 * happens, handle that and return -EFAULT.
89057 */
89058-extern long probe_kernel_read(void *dst, void *src, size_t size);
89059+extern long probe_kernel_read(void *dst, const void *src, size_t size);
89060
89061 /*
89062 * probe_kernel_write(): safely attempt to write to a location
89063@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
89064 * Safely write to address @dst from the buffer at @src. If a kernel fault
89065 * happens, handle that and return -EFAULT.
89066 */
89067-extern long probe_kernel_write(void *dst, void *src, size_t size);
89068+extern long probe_kernel_write(void *dst, const void *src, size_t size) __size_overflow(3);
89069
89070 #endif /* __LINUX_UACCESS_H__ */
89071diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
89072index 99c1b4d..bb94261 100644
89073--- a/include/linux/unaligned/access_ok.h
89074+++ b/include/linux/unaligned/access_ok.h
89075@@ -6,32 +6,32 @@
89076
89077 static inline u16 get_unaligned_le16(const void *p)
89078 {
89079- return le16_to_cpup((__le16 *)p);
89080+ return le16_to_cpup((const __le16 *)p);
89081 }
89082
89083 static inline u32 get_unaligned_le32(const void *p)
89084 {
89085- return le32_to_cpup((__le32 *)p);
89086+ return le32_to_cpup((const __le32 *)p);
89087 }
89088
89089 static inline u64 get_unaligned_le64(const void *p)
89090 {
89091- return le64_to_cpup((__le64 *)p);
89092+ return le64_to_cpup((const __le64 *)p);
89093 }
89094
89095 static inline u16 get_unaligned_be16(const void *p)
89096 {
89097- return be16_to_cpup((__be16 *)p);
89098+ return be16_to_cpup((const __be16 *)p);
89099 }
89100
89101 static inline u32 get_unaligned_be32(const void *p)
89102 {
89103- return be32_to_cpup((__be32 *)p);
89104+ return be32_to_cpup((const __be32 *)p);
89105 }
89106
89107 static inline u64 get_unaligned_be64(const void *p)
89108 {
89109- return be64_to_cpup((__be64 *)p);
89110+ return be64_to_cpup((const __be64 *)p);
89111 }
89112
89113 static inline void put_unaligned_le16(u16 val, void *p)
89114diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
89115index 79b9837..b5a56f9 100644
89116--- a/include/linux/vermagic.h
89117+++ b/include/linux/vermagic.h
89118@@ -26,9 +26,35 @@
89119 #define MODULE_ARCH_VERMAGIC ""
89120 #endif
89121
89122+#ifdef CONFIG_PAX_REFCOUNT
89123+#define MODULE_PAX_REFCOUNT "REFCOUNT "
89124+#else
89125+#define MODULE_PAX_REFCOUNT ""
89126+#endif
89127+
89128+#ifdef CONSTIFY_PLUGIN
89129+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
89130+#else
89131+#define MODULE_CONSTIFY_PLUGIN ""
89132+#endif
89133+
89134+#ifdef STACKLEAK_PLUGIN
89135+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
89136+#else
89137+#define MODULE_STACKLEAK_PLUGIN ""
89138+#endif
89139+
89140+#ifdef CONFIG_GRKERNSEC
89141+#define MODULE_GRSEC "GRSEC "
89142+#else
89143+#define MODULE_GRSEC ""
89144+#endif
89145+
89146 #define VERMAGIC_STRING \
89147 UTS_RELEASE " " \
89148 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
89149 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
89150- MODULE_ARCH_VERMAGIC
89151+ MODULE_ARCH_VERMAGIC \
89152+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
89153+ MODULE_GRSEC
89154
89155diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
89156index 819a634..b99e71b 100644
89157--- a/include/linux/vmalloc.h
89158+++ b/include/linux/vmalloc.h
89159@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
89160 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
89161 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
89162 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
89163+
89164+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
89165+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
89166+#endif
89167+
89168 /* bits [20..32] reserved for arch specific ioremap internals */
89169
89170 /*
89171@@ -51,13 +56,13 @@ static inline void vmalloc_init(void)
89172 }
89173 #endif
89174
89175-extern void *vmalloc(unsigned long size);
89176-extern void *vmalloc_user(unsigned long size);
89177-extern void *vmalloc_node(unsigned long size, int node);
89178-extern void *vmalloc_exec(unsigned long size);
89179-extern void *vmalloc_32(unsigned long size);
89180-extern void *vmalloc_32_user(unsigned long size);
89181-extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
89182+extern void *vmalloc(unsigned long size) __size_overflow(1);
89183+extern void *vmalloc_user(unsigned long size) __size_overflow(1);
89184+extern void *vmalloc_node(unsigned long size, int node) __size_overflow(1);
89185+extern void *vmalloc_exec(unsigned long size) __size_overflow(1);
89186+extern void *vmalloc_32(unsigned long size) __size_overflow(1);
89187+extern void *vmalloc_32_user(unsigned long size) __size_overflow(1);
89188+extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) __size_overflow(1);
89189 extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
89190 pgprot_t prot);
89191 extern void vfree(const void *addr);
89192@@ -106,8 +111,8 @@ extern struct vm_struct *alloc_vm_area(size_t size);
89193 extern void free_vm_area(struct vm_struct *area);
89194
89195 /* for /dev/kmem */
89196-extern long vread(char *buf, char *addr, unsigned long count);
89197-extern long vwrite(char *buf, char *addr, unsigned long count);
89198+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
89199+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
89200
89201 /*
89202 * Internals. Dont't use..
89203diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
89204index 13070d6..aa4159a 100644
89205--- a/include/linux/vmstat.h
89206+++ b/include/linux/vmstat.h
89207@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu)
89208 /*
89209 * Zone based page accounting with per cpu differentials.
89210 */
89211-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
89212+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
89213
89214 static inline void zone_page_state_add(long x, struct zone *zone,
89215 enum zone_stat_item item)
89216 {
89217- atomic_long_add(x, &zone->vm_stat[item]);
89218- atomic_long_add(x, &vm_stat[item]);
89219+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
89220+ atomic_long_add_unchecked(x, &vm_stat[item]);
89221 }
89222
89223 static inline unsigned long global_page_state(enum zone_stat_item item)
89224 {
89225- long x = atomic_long_read(&vm_stat[item]);
89226+ long x = atomic_long_read_unchecked(&vm_stat[item]);
89227 #ifdef CONFIG_SMP
89228 if (x < 0)
89229 x = 0;
89230@@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
89231 static inline unsigned long zone_page_state(struct zone *zone,
89232 enum zone_stat_item item)
89233 {
89234- long x = atomic_long_read(&zone->vm_stat[item]);
89235+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
89236 #ifdef CONFIG_SMP
89237 if (x < 0)
89238 x = 0;
89239@@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
89240 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
89241 enum zone_stat_item item)
89242 {
89243- long x = atomic_long_read(&zone->vm_stat[item]);
89244+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
89245
89246 #ifdef CONFIG_SMP
89247 int cpu;
89248@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
89249
89250 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
89251 {
89252- atomic_long_inc(&zone->vm_stat[item]);
89253- atomic_long_inc(&vm_stat[item]);
89254+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
89255+ atomic_long_inc_unchecked(&vm_stat[item]);
89256 }
89257
89258 static inline void __inc_zone_page_state(struct page *page,
89259@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page,
89260
89261 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
89262 {
89263- atomic_long_dec(&zone->vm_stat[item]);
89264- atomic_long_dec(&vm_stat[item]);
89265+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
89266+ atomic_long_dec_unchecked(&vm_stat[item]);
89267 }
89268
89269 static inline void __dec_zone_page_state(struct page *page,
89270diff --git a/include/linux/xattr.h b/include/linux/xattr.h
89271index 5c84af8..1a3b6e2 100644
89272--- a/include/linux/xattr.h
89273+++ b/include/linux/xattr.h
89274@@ -33,6 +33,11 @@
89275 #define XATTR_USER_PREFIX "user."
89276 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
89277
89278+/* User namespace */
89279+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
89280+#define XATTR_PAX_FLAGS_SUFFIX "flags"
89281+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
89282+
89283 struct inode;
89284 struct dentry;
89285
89286diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
89287index eed5fcc..5080d24 100644
89288--- a/include/media/saa7146_vv.h
89289+++ b/include/media/saa7146_vv.h
89290@@ -167,7 +167,7 @@ struct saa7146_ext_vv
89291 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
89292
89293 /* the extension can override this */
89294- struct v4l2_ioctl_ops ops;
89295+ v4l2_ioctl_ops_no_const ops;
89296 /* pointer to the saa7146 core ops */
89297 const struct v4l2_ioctl_ops *core_ops;
89298
89299diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
89300index 73c9867..2da8837 100644
89301--- a/include/media/v4l2-dev.h
89302+++ b/include/media/v4l2-dev.h
89303@@ -34,7 +34,7 @@ struct v4l2_device;
89304 #define V4L2_FL_UNREGISTERED (0)
89305
89306 struct v4l2_file_operations {
89307- struct module *owner;
89308+ struct module * const owner;
89309 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
89310 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
89311 unsigned int (*poll) (struct file *, struct poll_table_struct *);
89312@@ -46,6 +46,7 @@ struct v4l2_file_operations {
89313 int (*open) (struct file *);
89314 int (*release) (struct file *);
89315 };
89316+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
89317
89318 /*
89319 * Newer version of video_device, handled by videodev2.c
89320diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
89321index 5d5d550..f559ef1 100644
89322--- a/include/media/v4l2-device.h
89323+++ b/include/media/v4l2-device.h
89324@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
89325 this function returns 0. If the name ends with a digit (e.g. cx18),
89326 then the name will be set to cx18-0 since cx180 looks really odd. */
89327 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
89328- atomic_t *instance);
89329+ atomic_unchecked_t *instance);
89330
89331 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
89332 Since the parent disappears this ensures that v4l2_dev doesn't have an
89333diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
89334index 7a4529d..7244290 100644
89335--- a/include/media/v4l2-ioctl.h
89336+++ b/include/media/v4l2-ioctl.h
89337@@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
89338 long (*vidioc_default) (struct file *file, void *fh,
89339 int cmd, void *arg);
89340 };
89341+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
89342
89343
89344 /* v4l debugging and diagnostics */
89345diff --git a/include/net/flow.h b/include/net/flow.h
89346index 809970b..c3df4f3 100644
89347--- a/include/net/flow.h
89348+++ b/include/net/flow.h
89349@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
89350 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
89351 u8 dir, flow_resolve_t resolver);
89352 extern void flow_cache_flush(void);
89353-extern atomic_t flow_cache_genid;
89354+extern atomic_unchecked_t flow_cache_genid;
89355
89356 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
89357 {
89358diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
89359index 15e1f8fe..668837c 100644
89360--- a/include/net/inetpeer.h
89361+++ b/include/net/inetpeer.h
89362@@ -24,7 +24,7 @@ struct inet_peer
89363 __u32 dtime; /* the time of last use of not
89364 * referenced entries */
89365 atomic_t refcnt;
89366- atomic_t rid; /* Frag reception counter */
89367+ atomic_unchecked_t rid; /* Frag reception counter */
89368 __u32 tcp_ts;
89369 unsigned long tcp_ts_stamp;
89370 };
89371diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
89372index 98978e7..2243a3d 100644
89373--- a/include/net/ip_vs.h
89374+++ b/include/net/ip_vs.h
89375@@ -365,7 +365,7 @@ struct ip_vs_conn {
89376 struct ip_vs_conn *control; /* Master control connection */
89377 atomic_t n_control; /* Number of controlled ones */
89378 struct ip_vs_dest *dest; /* real server */
89379- atomic_t in_pkts; /* incoming packet counter */
89380+ atomic_unchecked_t in_pkts; /* incoming packet counter */
89381
89382 /* packet transmitter for different forwarding methods. If it
89383 mangles the packet, it must return NF_DROP or better NF_STOLEN,
89384@@ -466,7 +466,7 @@ struct ip_vs_dest {
89385 union nf_inet_addr addr; /* IP address of the server */
89386 __be16 port; /* port number of the server */
89387 volatile unsigned flags; /* dest status flags */
89388- atomic_t conn_flags; /* flags to copy to conn */
89389+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
89390 atomic_t weight; /* server weight */
89391
89392 atomic_t refcnt; /* reference counter */
89393diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
89394index 69b610a..fe3962c 100644
89395--- a/include/net/irda/ircomm_core.h
89396+++ b/include/net/irda/ircomm_core.h
89397@@ -51,7 +51,7 @@ typedef struct {
89398 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
89399 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
89400 struct ircomm_info *);
89401-} call_t;
89402+} __no_const call_t;
89403
89404 struct ircomm_cb {
89405 irda_queue_t queue;
89406diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
89407index eea2e61..08c692d 100644
89408--- a/include/net/irda/ircomm_tty.h
89409+++ b/include/net/irda/ircomm_tty.h
89410@@ -35,6 +35,7 @@
89411 #include <linux/termios.h>
89412 #include <linux/timer.h>
89413 #include <linux/tty.h> /* struct tty_struct */
89414+#include <asm/local.h>
89415
89416 #include <net/irda/irias_object.h>
89417 #include <net/irda/ircomm_core.h>
89418@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
89419 unsigned short close_delay;
89420 unsigned short closing_wait; /* time to wait before closing */
89421
89422- int open_count;
89423- int blocked_open; /* # of blocked opens */
89424+ local_t open_count;
89425+ local_t blocked_open; /* # of blocked opens */
89426
89427 /* Protect concurent access to :
89428 * o self->open_count
89429diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
89430index f82a1e8..82d81e8 100644
89431--- a/include/net/iucv/af_iucv.h
89432+++ b/include/net/iucv/af_iucv.h
89433@@ -87,7 +87,7 @@ struct iucv_sock {
89434 struct iucv_sock_list {
89435 struct hlist_head head;
89436 rwlock_t lock;
89437- atomic_t autobind_name;
89438+ atomic_unchecked_t autobind_name;
89439 };
89440
89441 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
89442diff --git a/include/net/lapb.h b/include/net/lapb.h
89443index 96cb5dd..25e8d4f 100644
89444--- a/include/net/lapb.h
89445+++ b/include/net/lapb.h
89446@@ -95,7 +95,7 @@ struct lapb_cb {
89447 struct sk_buff_head write_queue;
89448 struct sk_buff_head ack_queue;
89449 unsigned char window;
89450- struct lapb_register_struct callbacks;
89451+ struct lapb_register_struct *callbacks;
89452
89453 /* FRMR control information */
89454 struct lapb_frame frmr_data;
89455diff --git a/include/net/neighbour.h b/include/net/neighbour.h
89456index 3817fda..cdb2343 100644
89457--- a/include/net/neighbour.h
89458+++ b/include/net/neighbour.h
89459@@ -131,7 +131,7 @@ struct neigh_ops
89460 int (*connected_output)(struct sk_buff*);
89461 int (*hh_output)(struct sk_buff*);
89462 int (*queue_xmit)(struct sk_buff*);
89463-};
89464+} __do_const;
89465
89466 struct pneigh_entry
89467 {
89468diff --git a/include/net/netlink.h b/include/net/netlink.h
89469index c344646..4778c71 100644
89470--- a/include/net/netlink.h
89471+++ b/include/net/netlink.h
89472@@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
89473 {
89474 return (remaining >= (int) sizeof(struct nlmsghdr) &&
89475 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
89476- nlh->nlmsg_len <= remaining);
89477+ nlh->nlmsg_len <= (unsigned int)remaining);
89478 }
89479
89480 /**
89481@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
89482 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
89483 {
89484 if (mark)
89485- skb_trim(skb, (unsigned char *) mark - skb->data);
89486+ skb_trim(skb, (const unsigned char *) mark - skb->data);
89487 }
89488
89489 /**
89490diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
89491index 9a4b8b7..e49e077 100644
89492--- a/include/net/netns/ipv4.h
89493+++ b/include/net/netns/ipv4.h
89494@@ -54,7 +54,7 @@ struct netns_ipv4 {
89495 int current_rt_cache_rebuild_count;
89496
89497 struct timer_list rt_secret_timer;
89498- atomic_t rt_genid;
89499+ atomic_unchecked_t rt_genid;
89500
89501 #ifdef CONFIG_IP_MROUTE
89502 struct sock *mroute_sk;
89503diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
89504index 8a6d529..171f401 100644
89505--- a/include/net/sctp/sctp.h
89506+++ b/include/net/sctp/sctp.h
89507@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
89508
89509 #else /* SCTP_DEBUG */
89510
89511-#define SCTP_DEBUG_PRINTK(whatever...)
89512-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
89513+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
89514+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
89515 #define SCTP_ENABLE_DEBUG
89516 #define SCTP_DISABLE_DEBUG
89517 #define SCTP_ASSERT(expr, str, func)
89518diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
89519index d97f689..f3b90ab 100644
89520--- a/include/net/secure_seq.h
89521+++ b/include/net/secure_seq.h
89522@@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
89523 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
89524 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
89525 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
89526- __be16 dport);
89527+ __be16 dport);
89528 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
89529 __be16 sport, __be16 dport);
89530 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
89531- __be16 sport, __be16 dport);
89532+ __be16 sport, __be16 dport);
89533 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
89534- __be16 sport, __be16 dport);
89535+ __be16 sport, __be16 dport);
89536 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
89537- __be16 sport, __be16 dport);
89538+ __be16 sport, __be16 dport);
89539
89540 #endif /* _NET_SECURE_SEQ */
89541diff --git a/include/net/sock.h b/include/net/sock.h
89542index 78adf52..99afd29 100644
89543--- a/include/net/sock.h
89544+++ b/include/net/sock.h
89545@@ -272,7 +272,7 @@ struct sock {
89546 rwlock_t sk_callback_lock;
89547 int sk_err,
89548 sk_err_soft;
89549- atomic_t sk_drops;
89550+ atomic_unchecked_t sk_drops;
89551 unsigned short sk_ack_backlog;
89552 unsigned short sk_max_ack_backlog;
89553 __u32 sk_priority;
89554@@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
89555 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
89556 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
89557 #else
89558-static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
89559+static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
89560 int inc)
89561 {
89562 }
89563diff --git a/include/net/tcp.h b/include/net/tcp.h
89564index 6cfe18b..dd21acb 100644
89565--- a/include/net/tcp.h
89566+++ b/include/net/tcp.h
89567@@ -1444,8 +1444,8 @@ enum tcp_seq_states {
89568 struct tcp_seq_afinfo {
89569 char *name;
89570 sa_family_t family;
89571- struct file_operations seq_fops;
89572- struct seq_operations seq_ops;
89573+ file_operations_no_const seq_fops;
89574+ seq_operations_no_const seq_ops;
89575 };
89576
89577 struct tcp_iter_state {
89578diff --git a/include/net/udp.h b/include/net/udp.h
89579index f98abd2..b4b042f 100644
89580--- a/include/net/udp.h
89581+++ b/include/net/udp.h
89582@@ -187,8 +187,8 @@ struct udp_seq_afinfo {
89583 char *name;
89584 sa_family_t family;
89585 struct udp_table *udp_table;
89586- struct file_operations seq_fops;
89587- struct seq_operations seq_ops;
89588+ file_operations_no_const seq_fops;
89589+ seq_operations_no_const seq_ops;
89590 };
89591
89592 struct udp_iter_state {
89593diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
89594index cbb822e..e9c1cbe 100644
89595--- a/include/rdma/iw_cm.h
89596+++ b/include/rdma/iw_cm.h
89597@@ -129,7 +129,7 @@ struct iw_cm_verbs {
89598 int backlog);
89599
89600 int (*destroy_listen)(struct iw_cm_id *cm_id);
89601-};
89602+} __no_const;
89603
89604 /**
89605 * iw_create_cm_id - Create an IW CM identifier.
89606diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
89607index 09a124b..caa8ca8 100644
89608--- a/include/scsi/libfc.h
89609+++ b/include/scsi/libfc.h
89610@@ -675,6 +675,7 @@ struct libfc_function_template {
89611 */
89612 void (*disc_stop_final) (struct fc_lport *);
89613 };
89614+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
89615
89616 /* information used by the discovery layer */
89617 struct fc_disc {
89618@@ -707,7 +708,7 @@ struct fc_lport {
89619 struct fc_disc disc;
89620
89621 /* Operational Information */
89622- struct libfc_function_template tt;
89623+ libfc_function_template_no_const tt;
89624 u8 link_up;
89625 u8 qfull;
89626 enum fc_lport_state state;
89627diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
89628index de8e180..f15e0d7 100644
89629--- a/include/scsi/scsi_device.h
89630+++ b/include/scsi/scsi_device.h
89631@@ -156,9 +156,9 @@ struct scsi_device {
89632 unsigned int max_device_blocked; /* what device_blocked counts down from */
89633 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
89634
89635- atomic_t iorequest_cnt;
89636- atomic_t iodone_cnt;
89637- atomic_t ioerr_cnt;
89638+ atomic_unchecked_t iorequest_cnt;
89639+ atomic_unchecked_t iodone_cnt;
89640+ atomic_unchecked_t ioerr_cnt;
89641
89642 struct device sdev_gendev,
89643 sdev_dev;
89644diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
89645index 0b4baba..0106e9e 100644
89646--- a/include/scsi/scsi_host.h
89647+++ b/include/scsi/scsi_host.h
89648@@ -43,6 +43,12 @@ struct blk_queue_tags;
89649 #define DISABLE_CLUSTERING 0
89650 #define ENABLE_CLUSTERING 1
89651
89652+enum {
89653+ SCSI_QDEPTH_DEFAULT, /* default requested change, e.g. from sysfs */
89654+ SCSI_QDEPTH_QFULL, /* scsi-ml requested due to queue full */
89655+ SCSI_QDEPTH_RAMP_UP, /* scsi-ml requested due to threshhold event */
89656+};
89657+
89658 struct scsi_host_template {
89659 struct module *module;
89660 const char *name;
89661diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
89662index fc50bd6..81ba9cb 100644
89663--- a/include/scsi/scsi_transport_fc.h
89664+++ b/include/scsi/scsi_transport_fc.h
89665@@ -708,7 +708,7 @@ struct fc_function_template {
89666 unsigned long show_host_system_hostname:1;
89667
89668 unsigned long disable_target_scan:1;
89669-};
89670+} __do_const;
89671
89672
89673 /**
89674diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
89675index 3dae3f7..8440d6f 100644
89676--- a/include/sound/ac97_codec.h
89677+++ b/include/sound/ac97_codec.h
89678@@ -419,15 +419,15 @@
89679 struct snd_ac97;
89680
89681 struct snd_ac97_build_ops {
89682- int (*build_3d) (struct snd_ac97 *ac97);
89683- int (*build_specific) (struct snd_ac97 *ac97);
89684- int (*build_spdif) (struct snd_ac97 *ac97);
89685- int (*build_post_spdif) (struct snd_ac97 *ac97);
89686+ int (* const build_3d) (struct snd_ac97 *ac97);
89687+ int (* const build_specific) (struct snd_ac97 *ac97);
89688+ int (* const build_spdif) (struct snd_ac97 *ac97);
89689+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
89690 #ifdef CONFIG_PM
89691- void (*suspend) (struct snd_ac97 *ac97);
89692- void (*resume) (struct snd_ac97 *ac97);
89693+ void (* const suspend) (struct snd_ac97 *ac97);
89694+ void (* const resume) (struct snd_ac97 *ac97);
89695 #endif
89696- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
89697+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
89698 };
89699
89700 struct snd_ac97_bus_ops {
89701@@ -477,7 +477,7 @@ struct snd_ac97_template {
89702
89703 struct snd_ac97 {
89704 /* -- lowlevel (hardware) driver specific -- */
89705- struct snd_ac97_build_ops * build_ops;
89706+ const struct snd_ac97_build_ops * build_ops;
89707 void *private_data;
89708 void (*private_free) (struct snd_ac97 *ac97);
89709 /* --- */
89710diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
89711index 891cf1a..a94ba2b 100644
89712--- a/include/sound/ak4xxx-adda.h
89713+++ b/include/sound/ak4xxx-adda.h
89714@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
89715 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
89716 unsigned char val);
89717 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
89718-};
89719+} __no_const;
89720
89721 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
89722
89723diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
89724index 8c05e47..2b5df97 100644
89725--- a/include/sound/hwdep.h
89726+++ b/include/sound/hwdep.h
89727@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
89728 struct snd_hwdep_dsp_status *status);
89729 int (*dsp_load)(struct snd_hwdep *hw,
89730 struct snd_hwdep_dsp_image *image);
89731-};
89732+} __no_const;
89733
89734 struct snd_hwdep {
89735 struct snd_card *card;
89736diff --git a/include/sound/info.h b/include/sound/info.h
89737index 112e894..6fda5b5 100644
89738--- a/include/sound/info.h
89739+++ b/include/sound/info.h
89740@@ -44,7 +44,7 @@ struct snd_info_entry_text {
89741 struct snd_info_buffer *buffer);
89742 void (*write)(struct snd_info_entry *entry,
89743 struct snd_info_buffer *buffer);
89744-};
89745+} __no_const;
89746
89747 struct snd_info_entry_ops {
89748 int (*open)(struct snd_info_entry *entry,
89749diff --git a/include/sound/pcm.h b/include/sound/pcm.h
89750index de6d981..590a550 100644
89751--- a/include/sound/pcm.h
89752+++ b/include/sound/pcm.h
89753@@ -80,6 +80,7 @@ struct snd_pcm_ops {
89754 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
89755 int (*ack)(struct snd_pcm_substream *substream);
89756 };
89757+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
89758
89759 /*
89760 *
89761diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
89762index 736eac7..fe8a80f 100644
89763--- a/include/sound/sb16_csp.h
89764+++ b/include/sound/sb16_csp.h
89765@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
89766 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
89767 int (*csp_stop) (struct snd_sb_csp * p);
89768 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
89769-};
89770+} __no_const;
89771
89772 /*
89773 * CSP private data
89774diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
89775index 444cd6b..3327cc5 100644
89776--- a/include/sound/ymfpci.h
89777+++ b/include/sound/ymfpci.h
89778@@ -358,7 +358,7 @@ struct snd_ymfpci {
89779 spinlock_t reg_lock;
89780 spinlock_t voice_lock;
89781 wait_queue_head_t interrupt_sleep;
89782- atomic_t interrupt_sleep_count;
89783+ atomic_unchecked_t interrupt_sleep_count;
89784 struct snd_info_entry *proc_entry;
89785 const struct firmware *dsp_microcode;
89786 const struct firmware *controller_microcode;
89787diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
89788index b89f9db..f097b38 100644
89789--- a/include/trace/events/irq.h
89790+++ b/include/trace/events/irq.h
89791@@ -34,7 +34,7 @@
89792 */
89793 TRACE_EVENT(irq_handler_entry,
89794
89795- TP_PROTO(int irq, struct irqaction *action),
89796+ TP_PROTO(int irq, const struct irqaction *action),
89797
89798 TP_ARGS(irq, action),
89799
89800@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
89801 */
89802 TRACE_EVENT(irq_handler_exit,
89803
89804- TP_PROTO(int irq, struct irqaction *action, int ret),
89805+ TP_PROTO(int irq, const struct irqaction *action, int ret),
89806
89807 TP_ARGS(irq, action, ret),
89808
89809@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
89810 */
89811 TRACE_EVENT(softirq_entry,
89812
89813- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
89814+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
89815
89816 TP_ARGS(h, vec),
89817
89818@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
89819 */
89820 TRACE_EVENT(softirq_exit,
89821
89822- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
89823+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
89824
89825 TP_ARGS(h, vec),
89826
89827diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
89828index 0993a22..32ba2fe 100644
89829--- a/include/video/uvesafb.h
89830+++ b/include/video/uvesafb.h
89831@@ -177,6 +177,7 @@ struct uvesafb_par {
89832 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
89833 u8 pmi_setpal; /* PMI for palette changes */
89834 u16 *pmi_base; /* protected mode interface location */
89835+ u8 *pmi_code; /* protected mode code location */
89836 void *pmi_start;
89837 void *pmi_pal;
89838 u8 *vbe_state_orig; /*
89839diff --git a/init/Kconfig b/init/Kconfig
89840index d72691b..3996e54 100644
89841--- a/init/Kconfig
89842+++ b/init/Kconfig
89843@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
89844
89845 config COMPAT_BRK
89846 bool "Disable heap randomization"
89847- default y
89848+ default n
89849 help
89850 Randomizing heap placement makes heap exploits harder, but it
89851 also breaks ancient binaries (including anything libc5 based).
89852diff --git a/init/do_mounts.c b/init/do_mounts.c
89853index bb008d0..4fa3933 100644
89854--- a/init/do_mounts.c
89855+++ b/init/do_mounts.c
89856@@ -216,11 +216,11 @@ static void __init get_fs_names(char *page)
89857
89858 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
89859 {
89860- int err = sys_mount(name, "/root", fs, flags, data);
89861+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
89862 if (err)
89863 return err;
89864
89865- sys_chdir("/root");
89866+ sys_chdir((__force const char __user *)"/root");
89867 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
89868 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
89869 current->fs->pwd.mnt->mnt_sb->s_type->name,
89870@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...)
89871 va_start(args, fmt);
89872 vsprintf(buf, fmt, args);
89873 va_end(args);
89874- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
89875+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
89876 if (fd >= 0) {
89877 sys_ioctl(fd, FDEJECT, 0);
89878 sys_close(fd);
89879 }
89880 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
89881- fd = sys_open("/dev/console", O_RDWR, 0);
89882+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
89883 if (fd >= 0) {
89884 sys_ioctl(fd, TCGETS, (long)&termios);
89885 termios.c_lflag &= ~ICANON;
89886 sys_ioctl(fd, TCSETSF, (long)&termios);
89887- sys_read(fd, &c, 1);
89888+ sys_read(fd, (char __user *)&c, 1);
89889 termios.c_lflag |= ICANON;
89890 sys_ioctl(fd, TCSETSF, (long)&termios);
89891 sys_close(fd);
89892@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
89893 mount_root();
89894 out:
89895 devtmpfs_mount("dev");
89896- sys_mount(".", "/", NULL, MS_MOVE, NULL);
89897- sys_chroot(".");
89898+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
89899+ sys_chroot((__force char __user *)".");
89900 }
89901diff --git a/init/do_mounts.h b/init/do_mounts.h
89902index f5b978a..69dbfe8 100644
89903--- a/init/do_mounts.h
89904+++ b/init/do_mounts.h
89905@@ -15,15 +15,15 @@ extern int root_mountflags;
89906
89907 static inline int create_dev(char *name, dev_t dev)
89908 {
89909- sys_unlink(name);
89910- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
89911+ sys_unlink((char __force_user *)name);
89912+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
89913 }
89914
89915 #if BITS_PER_LONG == 32
89916 static inline u32 bstat(char *name)
89917 {
89918 struct stat64 stat;
89919- if (sys_stat64(name, &stat) != 0)
89920+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
89921 return 0;
89922 if (!S_ISBLK(stat.st_mode))
89923 return 0;
89924@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
89925 static inline u32 bstat(char *name)
89926 {
89927 struct stat stat;
89928- if (sys_newstat(name, &stat) != 0)
89929+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
89930 return 0;
89931 if (!S_ISBLK(stat.st_mode))
89932 return 0;
89933diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
89934index 614241b..4da046b 100644
89935--- a/init/do_mounts_initrd.c
89936+++ b/init/do_mounts_initrd.c
89937@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell)
89938 sys_close(old_fd);sys_close(root_fd);
89939 sys_close(0);sys_close(1);sys_close(2);
89940 sys_setsid();
89941- (void) sys_open("/dev/console",O_RDWR,0);
89942+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
89943 (void) sys_dup(0);
89944 (void) sys_dup(0);
89945 return kernel_execve(shell, argv, envp_init);
89946@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
89947 create_dev("/dev/root.old", Root_RAM0);
89948 /* mount initrd on rootfs' /root */
89949 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
89950- sys_mkdir("/old", 0700);
89951- root_fd = sys_open("/", 0, 0);
89952- old_fd = sys_open("/old", 0, 0);
89953+ sys_mkdir((const char __force_user *)"/old", 0700);
89954+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
89955+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
89956 /* move initrd over / and chdir/chroot in initrd root */
89957- sys_chdir("/root");
89958- sys_mount(".", "/", NULL, MS_MOVE, NULL);
89959- sys_chroot(".");
89960+ sys_chdir((const char __force_user *)"/root");
89961+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
89962+ sys_chroot((const char __force_user *)".");
89963
89964 /*
89965 * In case that a resume from disk is carried out by linuxrc or one of
89966@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
89967
89968 /* move initrd to rootfs' /old */
89969 sys_fchdir(old_fd);
89970- sys_mount("/", ".", NULL, MS_MOVE, NULL);
89971+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
89972 /* switch root and cwd back to / of rootfs */
89973 sys_fchdir(root_fd);
89974- sys_chroot(".");
89975+ sys_chroot((const char __force_user *)".");
89976 sys_close(old_fd);
89977 sys_close(root_fd);
89978
89979 if (new_decode_dev(real_root_dev) == Root_RAM0) {
89980- sys_chdir("/old");
89981+ sys_chdir((const char __force_user *)"/old");
89982 return;
89983 }
89984
89985@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
89986 mount_root();
89987
89988 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
89989- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
89990+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
89991 if (!error)
89992 printk("okay\n");
89993 else {
89994- int fd = sys_open("/dev/root.old", O_RDWR, 0);
89995+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
89996 if (error == -ENOENT)
89997 printk("/initrd does not exist. Ignored.\n");
89998 else
89999 printk("failed\n");
90000 printk(KERN_NOTICE "Unmounting old root\n");
90001- sys_umount("/old", MNT_DETACH);
90002+ sys_umount((char __force_user *)"/old", MNT_DETACH);
90003 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
90004 if (fd < 0) {
90005 error = fd;
90006@@ -119,11 +119,11 @@ int __init initrd_load(void)
90007 * mounted in the normal path.
90008 */
90009 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
90010- sys_unlink("/initrd.image");
90011+ sys_unlink((const char __force_user *)"/initrd.image");
90012 handle_initrd();
90013 return 1;
90014 }
90015 }
90016- sys_unlink("/initrd.image");
90017+ sys_unlink((const char __force_user *)"/initrd.image");
90018 return 0;
90019 }
90020diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
90021index 69aebbf..c0bf6a7 100644
90022--- a/init/do_mounts_md.c
90023+++ b/init/do_mounts_md.c
90024@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
90025 partitioned ? "_d" : "", minor,
90026 md_setup_args[ent].device_names);
90027
90028- fd = sys_open(name, 0, 0);
90029+ fd = sys_open((char __force_user *)name, 0, 0);
90030 if (fd < 0) {
90031 printk(KERN_ERR "md: open failed - cannot start "
90032 "array %s\n", name);
90033@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
90034 * array without it
90035 */
90036 sys_close(fd);
90037- fd = sys_open(name, 0, 0);
90038+ fd = sys_open((char __force_user *)name, 0, 0);
90039 sys_ioctl(fd, BLKRRPART, 0);
90040 }
90041 sys_close(fd);
90042@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
90043
90044 wait_for_device_probe();
90045
90046- fd = sys_open("/dev/md0", 0, 0);
90047+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
90048 if (fd >= 0) {
90049 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
90050 sys_close(fd);
90051diff --git a/init/initramfs.c b/init/initramfs.c
90052index 1fd59b8..a01b079 100644
90053--- a/init/initramfs.c
90054+++ b/init/initramfs.c
90055@@ -74,7 +74,7 @@ static void __init free_hash(void)
90056 }
90057 }
90058
90059-static long __init do_utime(char __user *filename, time_t mtime)
90060+static long __init do_utime(__force char __user *filename, time_t mtime)
90061 {
90062 struct timespec t[2];
90063
90064@@ -109,7 +109,7 @@ static void __init dir_utime(void)
90065 struct dir_entry *de, *tmp;
90066 list_for_each_entry_safe(de, tmp, &dir_list, list) {
90067 list_del(&de->list);
90068- do_utime(de->name, de->mtime);
90069+ do_utime((char __force_user *)de->name, de->mtime);
90070 kfree(de->name);
90071 kfree(de);
90072 }
90073@@ -271,7 +271,7 @@ static int __init maybe_link(void)
90074 if (nlink >= 2) {
90075 char *old = find_link(major, minor, ino, mode, collected);
90076 if (old)
90077- return (sys_link(old, collected) < 0) ? -1 : 1;
90078+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
90079 }
90080 return 0;
90081 }
90082@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
90083 {
90084 struct stat st;
90085
90086- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
90087+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
90088 if (S_ISDIR(st.st_mode))
90089- sys_rmdir(path);
90090+ sys_rmdir((char __force_user *)path);
90091 else
90092- sys_unlink(path);
90093+ sys_unlink((char __force_user *)path);
90094 }
90095 }
90096
90097@@ -305,7 +305,7 @@ static int __init do_name(void)
90098 int openflags = O_WRONLY|O_CREAT;
90099 if (ml != 1)
90100 openflags |= O_TRUNC;
90101- wfd = sys_open(collected, openflags, mode);
90102+ wfd = sys_open((char __force_user *)collected, openflags, mode);
90103
90104 if (wfd >= 0) {
90105 sys_fchown(wfd, uid, gid);
90106@@ -317,17 +317,17 @@ static int __init do_name(void)
90107 }
90108 }
90109 } else if (S_ISDIR(mode)) {
90110- sys_mkdir(collected, mode);
90111- sys_chown(collected, uid, gid);
90112- sys_chmod(collected, mode);
90113+ sys_mkdir((char __force_user *)collected, mode);
90114+ sys_chown((char __force_user *)collected, uid, gid);
90115+ sys_chmod((char __force_user *)collected, mode);
90116 dir_add(collected, mtime);
90117 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
90118 S_ISFIFO(mode) || S_ISSOCK(mode)) {
90119 if (maybe_link() == 0) {
90120- sys_mknod(collected, mode, rdev);
90121- sys_chown(collected, uid, gid);
90122- sys_chmod(collected, mode);
90123- do_utime(collected, mtime);
90124+ sys_mknod((char __force_user *)collected, mode, rdev);
90125+ sys_chown((char __force_user *)collected, uid, gid);
90126+ sys_chmod((char __force_user *)collected, mode);
90127+ do_utime((char __force_user *)collected, mtime);
90128 }
90129 }
90130 return 0;
90131@@ -336,15 +336,15 @@ static int __init do_name(void)
90132 static int __init do_copy(void)
90133 {
90134 if (count >= body_len) {
90135- sys_write(wfd, victim, body_len);
90136+ sys_write(wfd, (char __force_user *)victim, body_len);
90137 sys_close(wfd);
90138- do_utime(vcollected, mtime);
90139+ do_utime((char __force_user *)vcollected, mtime);
90140 kfree(vcollected);
90141 eat(body_len);
90142 state = SkipIt;
90143 return 0;
90144 } else {
90145- sys_write(wfd, victim, count);
90146+ sys_write(wfd, (char __force_user *)victim, count);
90147 body_len -= count;
90148 eat(count);
90149 return 1;
90150@@ -355,9 +355,9 @@ static int __init do_symlink(void)
90151 {
90152 collected[N_ALIGN(name_len) + body_len] = '\0';
90153 clean_path(collected, 0);
90154- sys_symlink(collected + N_ALIGN(name_len), collected);
90155- sys_lchown(collected, uid, gid);
90156- do_utime(collected, mtime);
90157+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
90158+ sys_lchown((char __force_user *)collected, uid, gid);
90159+ do_utime((char __force_user *)collected, mtime);
90160 state = SkipIt;
90161 next_state = Reset;
90162 return 0;
90163diff --git a/init/main.c b/init/main.c
90164index 1eb4bd5..fea5bbe 100644
90165--- a/init/main.c
90166+++ b/init/main.c
90167@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
90168 #ifdef CONFIG_TC
90169 extern void tc_init(void);
90170 #endif
90171+extern void grsecurity_init(void);
90172
90173 enum system_states system_state __read_mostly;
90174 EXPORT_SYMBOL(system_state);
90175@@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str)
90176
90177 __setup("reset_devices", set_reset_devices);
90178
90179+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
90180+extern char pax_enter_kernel_user[];
90181+extern char pax_exit_kernel_user[];
90182+extern pgdval_t clone_pgd_mask;
90183+#endif
90184+
90185+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
90186+static int __init setup_pax_nouderef(char *str)
90187+{
90188+#ifdef CONFIG_X86_32
90189+ unsigned int cpu;
90190+ struct desc_struct *gdt;
90191+
90192+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
90193+ gdt = get_cpu_gdt_table(cpu);
90194+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
90195+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
90196+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
90197+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
90198+ }
90199+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
90200+#else
90201+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
90202+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
90203+ clone_pgd_mask = ~(pgdval_t)0UL;
90204+#endif
90205+
90206+ return 0;
90207+}
90208+early_param("pax_nouderef", setup_pax_nouderef);
90209+#endif
90210+
90211+#ifdef CONFIG_PAX_SOFTMODE
90212+int pax_softmode;
90213+
90214+static int __init setup_pax_softmode(char *str)
90215+{
90216+ get_option(&str, &pax_softmode);
90217+ return 1;
90218+}
90219+__setup("pax_softmode=", setup_pax_softmode);
90220+#endif
90221+
90222 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
90223 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
90224 static const char *panic_later, *panic_param;
90225@@ -705,52 +749,53 @@ int initcall_debug;
90226 core_param(initcall_debug, initcall_debug, bool, 0644);
90227
90228 static char msgbuf[64];
90229-static struct boot_trace_call call;
90230-static struct boot_trace_ret ret;
90231+static struct boot_trace_call trace_call;
90232+static struct boot_trace_ret trace_ret;
90233
90234 int do_one_initcall(initcall_t fn)
90235 {
90236 int count = preempt_count();
90237 ktime_t calltime, delta, rettime;
90238+ const char *msg1 = "", *msg2 = "";
90239
90240 if (initcall_debug) {
90241- call.caller = task_pid_nr(current);
90242- printk("calling %pF @ %i\n", fn, call.caller);
90243+ trace_call.caller = task_pid_nr(current);
90244+ printk("calling %pF @ %i\n", fn, trace_call.caller);
90245 calltime = ktime_get();
90246- trace_boot_call(&call, fn);
90247+ trace_boot_call(&trace_call, fn);
90248 enable_boot_trace();
90249 }
90250
90251- ret.result = fn();
90252+ trace_ret.result = fn();
90253
90254 if (initcall_debug) {
90255 disable_boot_trace();
90256 rettime = ktime_get();
90257 delta = ktime_sub(rettime, calltime);
90258- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
90259- trace_boot_ret(&ret, fn);
90260+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
90261+ trace_boot_ret(&trace_ret, fn);
90262 printk("initcall %pF returned %d after %Ld usecs\n", fn,
90263- ret.result, ret.duration);
90264+ trace_ret.result, trace_ret.duration);
90265 }
90266
90267 msgbuf[0] = 0;
90268
90269- if (ret.result && ret.result != -ENODEV && initcall_debug)
90270- sprintf(msgbuf, "error code %d ", ret.result);
90271+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
90272+ sprintf(msgbuf, "error code %d ", trace_ret.result);
90273
90274 if (preempt_count() != count) {
90275- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
90276+ msg1 = " preemption imbalance";
90277 preempt_count() = count;
90278 }
90279 if (irqs_disabled()) {
90280- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
90281+ msg2 = " disabled interrupts";
90282 local_irq_enable();
90283 }
90284- if (msgbuf[0]) {
90285- printk("initcall %pF returned with %s\n", fn, msgbuf);
90286+ if (msgbuf[0] || *msg1 || *msg2) {
90287+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
90288 }
90289
90290- return ret.result;
90291+ return trace_ret.result;
90292 }
90293
90294
90295@@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
90296 if (!ramdisk_execute_command)
90297 ramdisk_execute_command = "/init";
90298
90299- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
90300+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
90301 ramdisk_execute_command = NULL;
90302 prepare_namespace();
90303 }
90304
90305+ grsecurity_init();
90306+
90307 /*
90308 * Ok, we have completed the initial bootup, and
90309 * we're essentially up and running. Get rid of the
90310diff --git a/init/noinitramfs.c b/init/noinitramfs.c
90311index f4c1a3a..96c19bd 100644
90312--- a/init/noinitramfs.c
90313+++ b/init/noinitramfs.c
90314@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
90315 {
90316 int err;
90317
90318- err = sys_mkdir("/dev", 0755);
90319+ err = sys_mkdir((const char __user *)"/dev", 0755);
90320 if (err < 0)
90321 goto out;
90322
90323@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
90324 if (err < 0)
90325 goto out;
90326
90327- err = sys_mkdir("/root", 0700);
90328+ err = sys_mkdir((const char __user *)"/root", 0700);
90329 if (err < 0)
90330 goto out;
90331
90332diff --git a/ipc/mqueue.c b/ipc/mqueue.c
90333index d01bc14..8df81db 100644
90334--- a/ipc/mqueue.c
90335+++ b/ipc/mqueue.c
90336@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
90337 mq_bytes = (mq_msg_tblsz +
90338 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
90339
90340+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
90341 spin_lock(&mq_lock);
90342 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
90343 u->mq_bytes + mq_bytes >
90344diff --git a/ipc/msg.c b/ipc/msg.c
90345index 779f762..4af9e36 100644
90346--- a/ipc/msg.c
90347+++ b/ipc/msg.c
90348@@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
90349 return security_msg_queue_associate(msq, msgflg);
90350 }
90351
90352+static struct ipc_ops msg_ops = {
90353+ .getnew = newque,
90354+ .associate = msg_security,
90355+ .more_checks = NULL
90356+};
90357+
90358 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
90359 {
90360 struct ipc_namespace *ns;
90361- struct ipc_ops msg_ops;
90362 struct ipc_params msg_params;
90363
90364 ns = current->nsproxy->ipc_ns;
90365
90366- msg_ops.getnew = newque;
90367- msg_ops.associate = msg_security;
90368- msg_ops.more_checks = NULL;
90369-
90370 msg_params.key = key;
90371 msg_params.flg = msgflg;
90372
90373diff --git a/ipc/sem.c b/ipc/sem.c
90374index b781007..f738b04 100644
90375--- a/ipc/sem.c
90376+++ b/ipc/sem.c
90377@@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
90378 return 0;
90379 }
90380
90381+static struct ipc_ops sem_ops = {
90382+ .getnew = newary,
90383+ .associate = sem_security,
90384+ .more_checks = sem_more_checks
90385+};
90386+
90387 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
90388 {
90389 struct ipc_namespace *ns;
90390- struct ipc_ops sem_ops;
90391 struct ipc_params sem_params;
90392
90393 ns = current->nsproxy->ipc_ns;
90394@@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
90395 if (nsems < 0 || nsems > ns->sc_semmsl)
90396 return -EINVAL;
90397
90398- sem_ops.getnew = newary;
90399- sem_ops.associate = sem_security;
90400- sem_ops.more_checks = sem_more_checks;
90401-
90402 sem_params.key = key;
90403 sem_params.flg = semflg;
90404 sem_params.u.nsems = nsems;
90405@@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
90406 ushort* sem_io = fast_sem_io;
90407 int nsems;
90408
90409+ pax_track_stack();
90410+
90411 sma = sem_lock_check(ns, semid);
90412 if (IS_ERR(sma))
90413 return PTR_ERR(sma);
90414@@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
90415 unsigned long jiffies_left = 0;
90416 struct ipc_namespace *ns;
90417
90418+ pax_track_stack();
90419+
90420 ns = current->nsproxy->ipc_ns;
90421
90422 if (nsops < 1 || semid < 0)
90423diff --git a/ipc/shm.c b/ipc/shm.c
90424index d30732c..e4992cd 100644
90425--- a/ipc/shm.c
90426+++ b/ipc/shm.c
90427@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
90428 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
90429 #endif
90430
90431+#ifdef CONFIG_GRKERNSEC
90432+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
90433+ const time_t shm_createtime, const uid_t cuid,
90434+ const int shmid);
90435+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
90436+ const time_t shm_createtime);
90437+#endif
90438+
90439 void shm_init_ns(struct ipc_namespace *ns)
90440 {
90441 ns->shm_ctlmax = SHMMAX;
90442@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
90443 shp->shm_lprid = 0;
90444 shp->shm_atim = shp->shm_dtim = 0;
90445 shp->shm_ctim = get_seconds();
90446+#ifdef CONFIG_GRKERNSEC
90447+ {
90448+ struct timespec timeval;
90449+ do_posix_clock_monotonic_gettime(&timeval);
90450+
90451+ shp->shm_createtime = timeval.tv_sec;
90452+ }
90453+#endif
90454 shp->shm_segsz = size;
90455 shp->shm_nattch = 0;
90456 shp->shm_file = file;
90457@@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
90458 return 0;
90459 }
90460
90461+static struct ipc_ops shm_ops = {
90462+ .getnew = newseg,
90463+ .associate = shm_security,
90464+ .more_checks = shm_more_checks
90465+};
90466+
90467 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
90468 {
90469 struct ipc_namespace *ns;
90470- struct ipc_ops shm_ops;
90471 struct ipc_params shm_params;
90472
90473 ns = current->nsproxy->ipc_ns;
90474
90475- shm_ops.getnew = newseg;
90476- shm_ops.associate = shm_security;
90477- shm_ops.more_checks = shm_more_checks;
90478-
90479 shm_params.key = key;
90480 shm_params.flg = shmflg;
90481 shm_params.u.size = size;
90482@@ -857,6 +874,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
90483 f_mode = FMODE_READ | FMODE_WRITE;
90484 }
90485 if (shmflg & SHM_EXEC) {
90486+
90487+#ifdef CONFIG_PAX_MPROTECT
90488+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
90489+ goto out;
90490+#endif
90491+
90492 prot |= PROT_EXEC;
90493 acc_mode |= S_IXUGO;
90494 }
90495@@ -880,9 +903,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
90496 if (err)
90497 goto out_unlock;
90498
90499+#ifdef CONFIG_GRKERNSEC
90500+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
90501+ shp->shm_perm.cuid, shmid) ||
90502+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
90503+ err = -EACCES;
90504+ goto out_unlock;
90505+ }
90506+#endif
90507+
90508 path.dentry = dget(shp->shm_file->f_path.dentry);
90509 path.mnt = shp->shm_file->f_path.mnt;
90510 shp->shm_nattch++;
90511+#ifdef CONFIG_GRKERNSEC
90512+ shp->shm_lapid = current->pid;
90513+#endif
90514 size = i_size_read(path.dentry->d_inode);
90515 shm_unlock(shp);
90516
90517diff --git a/kernel/acct.c b/kernel/acct.c
90518index a6605ca..ca91111 100644
90519--- a/kernel/acct.c
90520+++ b/kernel/acct.c
90521@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
90522 */
90523 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
90524 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
90525- file->f_op->write(file, (char *)&ac,
90526+ file->f_op->write(file, (char __force_user *)&ac,
90527 sizeof(acct_t), &file->f_pos);
90528 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
90529 set_fs(fs);
90530diff --git a/kernel/audit.c b/kernel/audit.c
90531index 5feed23..48415fd 100644
90532--- a/kernel/audit.c
90533+++ b/kernel/audit.c
90534@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
90535 3) suppressed due to audit_rate_limit
90536 4) suppressed due to audit_backlog_limit
90537 */
90538-static atomic_t audit_lost = ATOMIC_INIT(0);
90539+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
90540
90541 /* The netlink socket. */
90542 static struct sock *audit_sock;
90543@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
90544 unsigned long now;
90545 int print;
90546
90547- atomic_inc(&audit_lost);
90548+ atomic_inc_unchecked(&audit_lost);
90549
90550 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
90551
90552@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
90553 printk(KERN_WARNING
90554 "audit: audit_lost=%d audit_rate_limit=%d "
90555 "audit_backlog_limit=%d\n",
90556- atomic_read(&audit_lost),
90557+ atomic_read_unchecked(&audit_lost),
90558 audit_rate_limit,
90559 audit_backlog_limit);
90560 audit_panic(message);
90561@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
90562 status_set.pid = audit_pid;
90563 status_set.rate_limit = audit_rate_limit;
90564 status_set.backlog_limit = audit_backlog_limit;
90565- status_set.lost = atomic_read(&audit_lost);
90566+ status_set.lost = atomic_read_unchecked(&audit_lost);
90567 status_set.backlog = skb_queue_len(&audit_skb_queue);
90568 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
90569 &status_set, sizeof(status_set));
90570@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
90571 spin_unlock_irq(&tsk->sighand->siglock);
90572 }
90573 read_unlock(&tasklist_lock);
90574- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
90575- &s, sizeof(s));
90576+
90577+ if (!err)
90578+ audit_send_reply(NETLINK_CB(skb).pid, seq,
90579+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
90580 break;
90581 }
90582 case AUDIT_TTY_SET: {
90583@@ -1262,12 +1264,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
90584 avail = audit_expand(ab,
90585 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
90586 if (!avail)
90587- goto out;
90588+ goto out_va_end;
90589 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
90590 }
90591- va_end(args2);
90592 if (len > 0)
90593 skb_put(skb, len);
90594+out_va_end:
90595+ va_end(args2);
90596 out:
90597 return;
90598 }
90599diff --git a/kernel/auditsc.c b/kernel/auditsc.c
90600index 267e484..ac41bc3 100644
90601--- a/kernel/auditsc.c
90602+++ b/kernel/auditsc.c
90603@@ -1157,8 +1157,8 @@ static void audit_log_execve_info(struct audit_context *context,
90604 struct audit_buffer **ab,
90605 struct audit_aux_data_execve *axi)
90606 {
90607- int i;
90608- size_t len, len_sent = 0;
90609+ int i, len;
90610+ size_t len_sent = 0;
90611 const char __user *p;
90612 char *buf;
90613
90614@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
90615 }
90616
90617 /* global counter which is incremented every time something logs in */
90618-static atomic_t session_id = ATOMIC_INIT(0);
90619+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
90620
90621 /**
90622 * audit_set_loginuid - set a task's audit_context loginuid
90623@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
90624 */
90625 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
90626 {
90627- unsigned int sessionid = atomic_inc_return(&session_id);
90628+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
90629 struct audit_context *context = task->audit_context;
90630
90631 if (context && context->in_syscall) {
90632diff --git a/kernel/capability.c b/kernel/capability.c
90633index 8a944f5..db5001e 100644
90634--- a/kernel/capability.c
90635+++ b/kernel/capability.c
90636@@ -305,10 +305,26 @@ int capable(int cap)
90637 BUG();
90638 }
90639
90640- if (security_capable(cap) == 0) {
90641+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
90642 current->flags |= PF_SUPERPRIV;
90643 return 1;
90644 }
90645 return 0;
90646 }
90647+
90648+int capable_nolog(int cap)
90649+{
90650+ if (unlikely(!cap_valid(cap))) {
90651+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
90652+ BUG();
90653+ }
90654+
90655+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
90656+ current->flags |= PF_SUPERPRIV;
90657+ return 1;
90658+ }
90659+ return 0;
90660+}
90661+
90662 EXPORT_SYMBOL(capable);
90663+EXPORT_SYMBOL(capable_nolog);
90664diff --git a/kernel/cgroup.c b/kernel/cgroup.c
90665index 1fbcc74..7000012 100644
90666--- a/kernel/cgroup.c
90667+++ b/kernel/cgroup.c
90668@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
90669 struct hlist_head *hhead;
90670 struct cg_cgroup_link *link;
90671
90672+ pax_track_stack();
90673+
90674 /* First see if we already have a cgroup group that matches
90675 * the desired set */
90676 read_lock(&css_set_lock);
90677diff --git a/kernel/compat.c b/kernel/compat.c
90678index 8bc5578..186e44a 100644
90679--- a/kernel/compat.c
90680+++ b/kernel/compat.c
90681@@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
90682 mm_segment_t oldfs;
90683 long ret;
90684
90685- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
90686+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
90687 oldfs = get_fs();
90688 set_fs(KERNEL_DS);
90689 ret = hrtimer_nanosleep_restart(restart);
90690@@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
90691 oldfs = get_fs();
90692 set_fs(KERNEL_DS);
90693 ret = hrtimer_nanosleep(&tu,
90694- rmtp ? (struct timespec __user *)&rmt : NULL,
90695+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
90696 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
90697 set_fs(oldfs);
90698
90699@@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
90700 mm_segment_t old_fs = get_fs();
90701
90702 set_fs(KERNEL_DS);
90703- ret = sys_sigpending((old_sigset_t __user *) &s);
90704+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
90705 set_fs(old_fs);
90706 if (ret == 0)
90707 ret = put_user(s, set);
90708@@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
90709 old_fs = get_fs();
90710 set_fs(KERNEL_DS);
90711 ret = sys_sigprocmask(how,
90712- set ? (old_sigset_t __user *) &s : NULL,
90713- oset ? (old_sigset_t __user *) &s : NULL);
90714+ set ? (old_sigset_t __force_user *) &s : NULL,
90715+ oset ? (old_sigset_t __force_user *) &s : NULL);
90716 set_fs(old_fs);
90717 if (ret == 0)
90718 if (oset)
90719@@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
90720 mm_segment_t old_fs = get_fs();
90721
90722 set_fs(KERNEL_DS);
90723- ret = sys_old_getrlimit(resource, &r);
90724+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
90725 set_fs(old_fs);
90726
90727 if (!ret) {
90728@@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
90729 mm_segment_t old_fs = get_fs();
90730
90731 set_fs(KERNEL_DS);
90732- ret = sys_getrusage(who, (struct rusage __user *) &r);
90733+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
90734 set_fs(old_fs);
90735
90736 if (ret)
90737@@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
90738 set_fs (KERNEL_DS);
90739 ret = sys_wait4(pid,
90740 (stat_addr ?
90741- (unsigned int __user *) &status : NULL),
90742- options, (struct rusage __user *) &r);
90743+ (unsigned int __force_user *) &status : NULL),
90744+ options, (struct rusage __force_user *) &r);
90745 set_fs (old_fs);
90746
90747 if (ret > 0) {
90748@@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
90749 memset(&info, 0, sizeof(info));
90750
90751 set_fs(KERNEL_DS);
90752- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
90753- uru ? (struct rusage __user *)&ru : NULL);
90754+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
90755+ uru ? (struct rusage __force_user *)&ru : NULL);
90756 set_fs(old_fs);
90757
90758 if ((ret < 0) || (info.si_signo == 0))
90759@@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
90760 oldfs = get_fs();
90761 set_fs(KERNEL_DS);
90762 err = sys_timer_settime(timer_id, flags,
90763- (struct itimerspec __user *) &newts,
90764- (struct itimerspec __user *) &oldts);
90765+ (struct itimerspec __force_user *) &newts,
90766+ (struct itimerspec __force_user *) &oldts);
90767 set_fs(oldfs);
90768 if (!err && old && put_compat_itimerspec(old, &oldts))
90769 return -EFAULT;
90770@@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
90771 oldfs = get_fs();
90772 set_fs(KERNEL_DS);
90773 err = sys_timer_gettime(timer_id,
90774- (struct itimerspec __user *) &ts);
90775+ (struct itimerspec __force_user *) &ts);
90776 set_fs(oldfs);
90777 if (!err && put_compat_itimerspec(setting, &ts))
90778 return -EFAULT;
90779@@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
90780 oldfs = get_fs();
90781 set_fs(KERNEL_DS);
90782 err = sys_clock_settime(which_clock,
90783- (struct timespec __user *) &ts);
90784+ (struct timespec __force_user *) &ts);
90785 set_fs(oldfs);
90786 return err;
90787 }
90788@@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
90789 oldfs = get_fs();
90790 set_fs(KERNEL_DS);
90791 err = sys_clock_gettime(which_clock,
90792- (struct timespec __user *) &ts);
90793+ (struct timespec __force_user *) &ts);
90794 set_fs(oldfs);
90795 if (!err && put_compat_timespec(&ts, tp))
90796 return -EFAULT;
90797@@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
90798 oldfs = get_fs();
90799 set_fs(KERNEL_DS);
90800 err = sys_clock_getres(which_clock,
90801- (struct timespec __user *) &ts);
90802+ (struct timespec __force_user *) &ts);
90803 set_fs(oldfs);
90804 if (!err && tp && put_compat_timespec(&ts, tp))
90805 return -EFAULT;
90806@@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
90807 long err;
90808 mm_segment_t oldfs;
90809 struct timespec tu;
90810- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
90811+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
90812
90813- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
90814+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
90815 oldfs = get_fs();
90816 set_fs(KERNEL_DS);
90817 err = clock_nanosleep_restart(restart);
90818@@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
90819 oldfs = get_fs();
90820 set_fs(KERNEL_DS);
90821 err = sys_clock_nanosleep(which_clock, flags,
90822- (struct timespec __user *) &in,
90823- (struct timespec __user *) &out);
90824+ (struct timespec __force_user *) &in,
90825+ (struct timespec __force_user *) &out);
90826 set_fs(oldfs);
90827
90828 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
90829diff --git a/kernel/configs.c b/kernel/configs.c
90830index abaee68..047facd 100644
90831--- a/kernel/configs.c
90832+++ b/kernel/configs.c
90833@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
90834 struct proc_dir_entry *entry;
90835
90836 /* create the current config file */
90837+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
90838+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
90839+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
90840+ &ikconfig_file_ops);
90841+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
90842+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
90843+ &ikconfig_file_ops);
90844+#endif
90845+#else
90846 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
90847 &ikconfig_file_ops);
90848+#endif
90849+
90850 if (!entry)
90851 return -ENOMEM;
90852
90853diff --git a/kernel/cpu.c b/kernel/cpu.c
90854index 3f2f04f..4e53ded 100644
90855--- a/kernel/cpu.c
90856+++ b/kernel/cpu.c
90857@@ -20,7 +20,7 @@
90858 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
90859 static DEFINE_MUTEX(cpu_add_remove_lock);
90860
90861-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
90862+static RAW_NOTIFIER_HEAD(cpu_chain);
90863
90864 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
90865 * Should always be manipulated under cpu_add_remove_lock
90866diff --git a/kernel/cred.c b/kernel/cred.c
90867index 0b5b5fc..f7fe51a 100644
90868--- a/kernel/cred.c
90869+++ b/kernel/cred.c
90870@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
90871 */
90872 void __put_cred(struct cred *cred)
90873 {
90874+ pax_track_stack();
90875+
90876 kdebug("__put_cred(%p{%d,%d})", cred,
90877 atomic_read(&cred->usage),
90878 read_cred_subscribers(cred));
90879@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
90880 {
90881 struct cred *cred;
90882
90883+ pax_track_stack();
90884+
90885 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
90886 atomic_read(&tsk->cred->usage),
90887 read_cred_subscribers(tsk->cred));
90888@@ -206,6 +210,15 @@ void exit_creds(struct task_struct *tsk)
90889 validate_creds(cred);
90890 put_cred(cred);
90891 }
90892+
90893+#ifdef CONFIG_GRKERNSEC_SETXID
90894+ cred = (struct cred *) tsk->delayed_cred;
90895+ if (cred) {
90896+ tsk->delayed_cred = NULL;
90897+ validate_creds(cred);
90898+ put_cred(cred);
90899+ }
90900+#endif
90901 }
90902
90903 /**
90904@@ -222,6 +235,8 @@ const struct cred *get_task_cred(struct task_struct *task)
90905 {
90906 const struct cred *cred;
90907
90908+ pax_track_stack();
90909+
90910 rcu_read_lock();
90911
90912 do {
90913@@ -241,6 +256,8 @@ struct cred *cred_alloc_blank(void)
90914 {
90915 struct cred *new;
90916
90917+ pax_track_stack();
90918+
90919 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
90920 if (!new)
90921 return NULL;
90922@@ -289,6 +306,8 @@ struct cred *prepare_creds(void)
90923 const struct cred *old;
90924 struct cred *new;
90925
90926+ pax_track_stack();
90927+
90928 validate_process_creds();
90929
90930 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
90931@@ -335,6 +354,8 @@ struct cred *prepare_exec_creds(void)
90932 struct thread_group_cred *tgcred = NULL;
90933 struct cred *new;
90934
90935+ pax_track_stack();
90936+
90937 #ifdef CONFIG_KEYS
90938 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
90939 if (!tgcred)
90940@@ -441,6 +462,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
90941 struct cred *new;
90942 int ret;
90943
90944+ pax_track_stack();
90945+
90946 mutex_init(&p->cred_guard_mutex);
90947
90948 if (
90949@@ -523,11 +546,13 @@ error_put:
90950 * Always returns 0 thus allowing this function to be tail-called at the end
90951 * of, say, sys_setgid().
90952 */
90953-int commit_creds(struct cred *new)
90954+static int __commit_creds(struct cred *new)
90955 {
90956 struct task_struct *task = current;
90957 const struct cred *old = task->real_cred;
90958
90959+ pax_track_stack();
90960+
90961 kdebug("commit_creds(%p{%d,%d})", new,
90962 atomic_read(&new->usage),
90963 read_cred_subscribers(new));
90964@@ -544,6 +569,8 @@ int commit_creds(struct cred *new)
90965
90966 get_cred(new); /* we will require a ref for the subj creds too */
90967
90968+ gr_set_role_label(task, new->uid, new->gid);
90969+
90970 /* dumpability changes */
90971 if (old->euid != new->euid ||
90972 old->egid != new->egid ||
90973@@ -563,10 +590,8 @@ int commit_creds(struct cred *new)
90974 key_fsgid_changed(task);
90975
90976 /* do it
90977- * - What if a process setreuid()'s and this brings the
90978- * new uid over his NPROC rlimit? We can check this now
90979- * cheaply with the new uid cache, so if it matters
90980- * we should be checking for it. -DaveM
90981+ * RLIMIT_NPROC limits on user->processes have already been checked
90982+ * in set_user().
90983 */
90984 alter_cred_subscribers(new, 2);
90985 if (new->user != old->user)
90986@@ -595,8 +620,96 @@ int commit_creds(struct cred *new)
90987 put_cred(old);
90988 return 0;
90989 }
90990+
90991+#ifdef CONFIG_GRKERNSEC_SETXID
90992+extern int set_user(struct cred *new);
90993+
90994+void gr_delayed_cred_worker(void)
90995+{
90996+ const struct cred *new = current->delayed_cred;
90997+ struct cred *ncred;
90998+
90999+ current->delayed_cred = NULL;
91000+
91001+ if (current_uid() && new != NULL) {
91002+ // from doing get_cred on it when queueing this
91003+ put_cred(new);
91004+ return;
91005+ } else if (new == NULL)
91006+ return;
91007+
91008+ ncred = prepare_creds();
91009+ if (!ncred)
91010+ goto die;
91011+ // uids
91012+ ncred->uid = new->uid;
91013+ ncred->euid = new->euid;
91014+ ncred->suid = new->suid;
91015+ ncred->fsuid = new->fsuid;
91016+ // gids
91017+ ncred->gid = new->gid;
91018+ ncred->egid = new->egid;
91019+ ncred->sgid = new->sgid;
91020+ ncred->fsgid = new->fsgid;
91021+ // groups
91022+ if (set_groups(ncred, new->group_info) < 0) {
91023+ abort_creds(ncred);
91024+ goto die;
91025+ }
91026+ // caps
91027+ ncred->securebits = new->securebits;
91028+ ncred->cap_inheritable = new->cap_inheritable;
91029+ ncred->cap_permitted = new->cap_permitted;
91030+ ncred->cap_effective = new->cap_effective;
91031+ ncred->cap_bset = new->cap_bset;
91032+
91033+ if (set_user(ncred)) {
91034+ abort_creds(ncred);
91035+ goto die;
91036+ }
91037+
91038+ // from doing get_cred on it when queueing this
91039+ put_cred(new);
91040+
91041+ __commit_creds(ncred);
91042+ return;
91043+die:
91044+ // from doing get_cred on it when queueing this
91045+ put_cred(new);
91046+ do_group_exit(SIGKILL);
91047+}
91048+#endif
91049+
91050+int commit_creds(struct cred *new)
91051+{
91052+#ifdef CONFIG_GRKERNSEC_SETXID
91053+ struct task_struct *t;
91054+
91055+ /* we won't get called with tasklist_lock held for writing
91056+ and interrupts disabled as the cred struct in that case is
91057+ init_cred
91058+ */
91059+ if (grsec_enable_setxid && !current_is_single_threaded() &&
91060+ !current_uid() && new->uid) {
91061+ rcu_read_lock();
91062+ read_lock(&tasklist_lock);
91063+ for (t = next_thread(current); t != current;
91064+ t = next_thread(t)) {
91065+ if (t->delayed_cred == NULL) {
91066+ t->delayed_cred = get_cred(new);
91067+ set_tsk_need_resched(t);
91068+ }
91069+ }
91070+ read_unlock(&tasklist_lock);
91071+ rcu_read_unlock();
91072+ }
91073+#endif
91074+ return __commit_creds(new);
91075+}
91076+
91077 EXPORT_SYMBOL(commit_creds);
91078
91079+
91080 /**
91081 * abort_creds - Discard a set of credentials and unlock the current task
91082 * @new: The credentials that were going to be applied
91083@@ -606,6 +719,8 @@ EXPORT_SYMBOL(commit_creds);
91084 */
91085 void abort_creds(struct cred *new)
91086 {
91087+ pax_track_stack();
91088+
91089 kdebug("abort_creds(%p{%d,%d})", new,
91090 atomic_read(&new->usage),
91091 read_cred_subscribers(new));
91092@@ -629,6 +744,8 @@ const struct cred *override_creds(const struct cred *new)
91093 {
91094 const struct cred *old = current->cred;
91095
91096+ pax_track_stack();
91097+
91098 kdebug("override_creds(%p{%d,%d})", new,
91099 atomic_read(&new->usage),
91100 read_cred_subscribers(new));
91101@@ -658,6 +775,8 @@ void revert_creds(const struct cred *old)
91102 {
91103 const struct cred *override = current->cred;
91104
91105+ pax_track_stack();
91106+
91107 kdebug("revert_creds(%p{%d,%d})", old,
91108 atomic_read(&old->usage),
91109 read_cred_subscribers(old));
91110@@ -704,6 +823,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
91111 const struct cred *old;
91112 struct cred *new;
91113
91114+ pax_track_stack();
91115+
91116 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
91117 if (!new)
91118 return NULL;
91119@@ -758,6 +879,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
91120 */
91121 int set_security_override(struct cred *new, u32 secid)
91122 {
91123+ pax_track_stack();
91124+
91125 return security_kernel_act_as(new, secid);
91126 }
91127 EXPORT_SYMBOL(set_security_override);
91128@@ -777,6 +900,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
91129 u32 secid;
91130 int ret;
91131
91132+ pax_track_stack();
91133+
91134 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
91135 if (ret < 0)
91136 return ret;
91137diff --git a/kernel/exit.c b/kernel/exit.c
91138index 0f8fae3..66af9b1 100644
91139--- a/kernel/exit.c
91140+++ b/kernel/exit.c
91141@@ -55,6 +55,10 @@
91142 #include <asm/pgtable.h>
91143 #include <asm/mmu_context.h>
91144
91145+#ifdef CONFIG_GRKERNSEC
91146+extern rwlock_t grsec_exec_file_lock;
91147+#endif
91148+
91149 static void exit_mm(struct task_struct * tsk);
91150
91151 static void __unhash_process(struct task_struct *p)
91152@@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
91153 struct task_struct *leader;
91154 int zap_leader;
91155 repeat:
91156+#ifdef CONFIG_NET
91157+ gr_del_task_from_ip_table(p);
91158+#endif
91159+
91160 tracehook_prepare_release_task(p);
91161 /* don't need to get the RCU readlock here - the process is dead and
91162 * can't be modifying its own credentials */
91163@@ -397,7 +405,7 @@ int allow_signal(int sig)
91164 * know it'll be handled, so that they don't get converted to
91165 * SIGKILL or just silently dropped.
91166 */
91167- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
91168+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
91169 recalc_sigpending();
91170 spin_unlock_irq(&current->sighand->siglock);
91171 return 0;
91172@@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
91173 vsnprintf(current->comm, sizeof(current->comm), name, args);
91174 va_end(args);
91175
91176+#ifdef CONFIG_GRKERNSEC
91177+ write_lock(&grsec_exec_file_lock);
91178+ if (current->exec_file) {
91179+ fput(current->exec_file);
91180+ current->exec_file = NULL;
91181+ }
91182+ write_unlock(&grsec_exec_file_lock);
91183+#endif
91184+
91185+ gr_set_kernel_label(current);
91186+
91187 /*
91188 * If we were started as result of loading a module, close all of the
91189 * user space pages. We don't need them, and if we didn't close them
91190@@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
91191 struct task_struct *tsk = current;
91192 int group_dead;
91193
91194- profile_task_exit(tsk);
91195-
91196- WARN_ON(atomic_read(&tsk->fs_excl));
91197-
91198+ /*
91199+ * Check this first since set_fs() below depends on
91200+ * current_thread_info(), which we better not access when we're in
91201+ * interrupt context. Other than that, we want to do the set_fs()
91202+ * as early as possible.
91203+ */
91204 if (unlikely(in_interrupt()))
91205 panic("Aiee, killing interrupt handler!");
91206- if (unlikely(!tsk->pid))
91207- panic("Attempted to kill the idle task!");
91208
91209 /*
91210- * If do_exit is called because this processes oopsed, it's possible
91211+ * If do_exit is called because this processes Oops'ed, it's possible
91212 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
91213 * continuing. Amongst other possible reasons, this is to prevent
91214 * mm_release()->clear_child_tid() from writing to a user-controlled
91215@@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
91216 */
91217 set_fs(USER_DS);
91218
91219+ profile_task_exit(tsk);
91220+
91221+ WARN_ON(atomic_read(&tsk->fs_excl));
91222+
91223+ if (unlikely(!tsk->pid))
91224+ panic("Attempted to kill the idle task!");
91225+
91226 tracehook_report_exit(&code);
91227
91228 validate_creds_for_do_exit(tsk);
91229@@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
91230 tsk->exit_code = code;
91231 taskstats_exit(tsk, group_dead);
91232
91233+ gr_acl_handle_psacct(tsk, code);
91234+ gr_acl_handle_exit();
91235+
91236 exit_mm(tsk);
91237
91238 if (group_dead)
91239@@ -1020,7 +1049,7 @@ NORET_TYPE void do_exit(long code)
91240 tsk->flags |= PF_EXITPIDONE;
91241
91242 if (tsk->io_context)
91243- exit_io_context();
91244+ exit_io_context(tsk);
91245
91246 if (tsk->splice_pipe)
91247 __free_pipe_info(tsk->splice_pipe);
91248@@ -1059,7 +1088,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
91249 * Take down every thread in the group. This is called by fatal signals
91250 * as well as by sys_exit_group (below).
91251 */
91252-NORET_TYPE void
91253+__noreturn void
91254 do_group_exit(int exit_code)
91255 {
91256 struct signal_struct *sig = current->signal;
91257@@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
91258
91259 if (unlikely(wo->wo_flags & WNOWAIT)) {
91260 int exit_code = p->exit_code;
91261- int why, status;
91262+ int why;
91263
91264 get_task_struct(p);
91265 read_unlock(&tasklist_lock);
91266diff --git a/kernel/fork.c b/kernel/fork.c
91267index 4bde56f..8976a8f 100644
91268--- a/kernel/fork.c
91269+++ b/kernel/fork.c
91270@@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
91271 *stackend = STACK_END_MAGIC; /* for overflow detection */
91272
91273 #ifdef CONFIG_CC_STACKPROTECTOR
91274- tsk->stack_canary = get_random_int();
91275+ tsk->stack_canary = pax_get_random_long();
91276 #endif
91277
91278 /* One for us, one for whoever does the "release_task()" (usually parent) */
91279@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
91280 mm->locked_vm = 0;
91281 mm->mmap = NULL;
91282 mm->mmap_cache = NULL;
91283- mm->free_area_cache = oldmm->mmap_base;
91284- mm->cached_hole_size = ~0UL;
91285+ mm->free_area_cache = oldmm->free_area_cache;
91286+ mm->cached_hole_size = oldmm->cached_hole_size;
91287 mm->map_count = 0;
91288 cpumask_clear(mm_cpumask(mm));
91289 mm->mm_rb = RB_ROOT;
91290@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
91291 tmp->vm_flags &= ~VM_LOCKED;
91292 tmp->vm_mm = mm;
91293 tmp->vm_next = tmp->vm_prev = NULL;
91294+ tmp->vm_mirror = NULL;
91295 anon_vma_link(tmp);
91296 file = tmp->vm_file;
91297 if (file) {
91298@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
91299 if (retval)
91300 goto out;
91301 }
91302+
91303+#ifdef CONFIG_PAX_SEGMEXEC
91304+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
91305+ struct vm_area_struct *mpnt_m;
91306+
91307+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
91308+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
91309+
91310+ if (!mpnt->vm_mirror)
91311+ continue;
91312+
91313+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
91314+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
91315+ mpnt->vm_mirror = mpnt_m;
91316+ } else {
91317+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
91318+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
91319+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
91320+ mpnt->vm_mirror->vm_mirror = mpnt;
91321+ }
91322+ }
91323+ BUG_ON(mpnt_m);
91324+ }
91325+#endif
91326+
91327 /* a new mm has just been created */
91328 arch_dup_mmap(oldmm, mm);
91329 retval = 0;
91330@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
91331 write_unlock(&fs->lock);
91332 return -EAGAIN;
91333 }
91334- fs->users++;
91335+ atomic_inc(&fs->users);
91336 write_unlock(&fs->lock);
91337 return 0;
91338 }
91339 tsk->fs = copy_fs_struct(fs);
91340 if (!tsk->fs)
91341 return -ENOMEM;
91342+ gr_set_chroot_entries(tsk, &tsk->fs->root);
91343 return 0;
91344 }
91345
91346@@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
91347 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
91348 #endif
91349 retval = -EAGAIN;
91350+
91351+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
91352+
91353 if (atomic_read(&p->real_cred->user->processes) >=
91354 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
91355- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
91356- p->real_cred->user != INIT_USER)
91357+ if (p->real_cred->user != INIT_USER &&
91358+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
91359 goto bad_fork_free;
91360 }
91361+ current->flags &= ~PF_NPROC_EXCEEDED;
91362
91363 retval = copy_creds(p, clone_flags);
91364 if (retval < 0)
91365@@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
91366 goto bad_fork_free_pid;
91367 }
91368
91369+ gr_copy_label(p);
91370+
91371 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
91372 /*
91373 * Clear TID on mm_release()?
91374@@ -1299,7 +1332,8 @@ bad_fork_free_pid:
91375 if (pid != &init_struct_pid)
91376 free_pid(pid);
91377 bad_fork_cleanup_io:
91378- put_io_context(p->io_context);
91379+ if (p->io_context)
91380+ exit_io_context(p);
91381 bad_fork_cleanup_namespaces:
91382 exit_task_namespaces(p);
91383 bad_fork_cleanup_mm:
91384@@ -1333,6 +1367,8 @@ bad_fork_cleanup_count:
91385 bad_fork_free:
91386 free_task(p);
91387 fork_out:
91388+ gr_log_forkfail(retval);
91389+
91390 return ERR_PTR(retval);
91391 }
91392
91393@@ -1426,6 +1462,8 @@ long do_fork(unsigned long clone_flags,
91394 if (clone_flags & CLONE_PARENT_SETTID)
91395 put_user(nr, parent_tidptr);
91396
91397+ gr_handle_brute_check();
91398+
91399 if (clone_flags & CLONE_VFORK) {
91400 p->vfork_done = &vfork;
91401 init_completion(&vfork);
91402@@ -1558,7 +1596,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
91403 return 0;
91404
91405 /* don't need lock here; in the worst case we'll do useless copy */
91406- if (fs->users == 1)
91407+ if (atomic_read(&fs->users) == 1)
91408 return 0;
91409
91410 *new_fsp = copy_fs_struct(fs);
91411@@ -1681,7 +1719,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
91412 fs = current->fs;
91413 write_lock(&fs->lock);
91414 current->fs = new_fs;
91415- if (--fs->users)
91416+ gr_set_chroot_entries(current, &current->fs->root);
91417+ if (atomic_dec_return(&fs->users))
91418 new_fs = NULL;
91419 else
91420 new_fs = fs;
91421diff --git a/kernel/futex.c b/kernel/futex.c
91422index fb98c9f..333faec 100644
91423--- a/kernel/futex.c
91424+++ b/kernel/futex.c
91425@@ -54,6 +54,7 @@
91426 #include <linux/mount.h>
91427 #include <linux/pagemap.h>
91428 #include <linux/syscalls.h>
91429+#include <linux/ptrace.h>
91430 #include <linux/signal.h>
91431 #include <linux/module.h>
91432 #include <linux/magic.h>
91433@@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
91434 struct page *page;
91435 int err, ro = 0;
91436
91437+#ifdef CONFIG_PAX_SEGMEXEC
91438+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
91439+ return -EFAULT;
91440+#endif
91441+
91442 /*
91443 * The futex address must be "naturally" aligned.
91444 */
91445@@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
91446 struct futex_q q;
91447 int ret;
91448
91449+ pax_track_stack();
91450+
91451 if (!bitset)
91452 return -EINVAL;
91453
91454@@ -1871,7 +1879,7 @@ retry:
91455
91456 restart = &current_thread_info()->restart_block;
91457 restart->fn = futex_wait_restart;
91458- restart->futex.uaddr = (u32 *)uaddr;
91459+ restart->futex.uaddr = uaddr;
91460 restart->futex.val = val;
91461 restart->futex.time = abs_time->tv64;
91462 restart->futex.bitset = bitset;
91463@@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
91464 struct futex_q q;
91465 int res, ret;
91466
91467+ pax_track_stack();
91468+
91469 if (!bitset)
91470 return -EINVAL;
91471
91472@@ -2423,6 +2433,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
91473 if (!p)
91474 goto err_unlock;
91475 ret = -EPERM;
91476+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
91477+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
91478+ goto err_unlock;
91479+#endif
91480 pcred = __task_cred(p);
91481 if (cred->euid != pcred->euid &&
91482 cred->euid != pcred->uid &&
91483@@ -2489,7 +2503,7 @@ retry:
91484 */
91485 static inline int fetch_robust_entry(struct robust_list __user **entry,
91486 struct robust_list __user * __user *head,
91487- int *pi)
91488+ unsigned int *pi)
91489 {
91490 unsigned long uentry;
91491
91492@@ -2670,6 +2684,7 @@ static int __init futex_init(void)
91493 {
91494 u32 curval;
91495 int i;
91496+ mm_segment_t oldfs;
91497
91498 /*
91499 * This will fail and we want it. Some arch implementations do
91500@@ -2681,7 +2696,10 @@ static int __init futex_init(void)
91501 * implementation, the non functional ones will return
91502 * -ENOSYS.
91503 */
91504+ oldfs = get_fs();
91505+ set_fs(USER_DS);
91506 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
91507+ set_fs(oldfs);
91508 if (curval == -EFAULT)
91509 futex_cmpxchg_enabled = 1;
91510
91511diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
91512index 2357165..eb25501 100644
91513--- a/kernel/futex_compat.c
91514+++ b/kernel/futex_compat.c
91515@@ -10,6 +10,7 @@
91516 #include <linux/compat.h>
91517 #include <linux/nsproxy.h>
91518 #include <linux/futex.h>
91519+#include <linux/ptrace.h>
91520
91521 #include <asm/uaccess.h>
91522
91523@@ -135,7 +136,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
91524 {
91525 struct compat_robust_list_head __user *head;
91526 unsigned long ret;
91527- const struct cred *cred = current_cred(), *pcred;
91528+ const struct cred *cred = current_cred();
91529+ const struct cred *pcred;
91530
91531 if (!futex_cmpxchg_enabled)
91532 return -ENOSYS;
91533@@ -151,6 +153,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
91534 if (!p)
91535 goto err_unlock;
91536 ret = -EPERM;
91537+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
91538+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
91539+ goto err_unlock;
91540+#endif
91541 pcred = __task_cred(p);
91542 if (cred->euid != pcred->euid &&
91543 cred->euid != pcred->uid &&
91544diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
91545index 9b22d03..6295b62 100644
91546--- a/kernel/gcov/base.c
91547+++ b/kernel/gcov/base.c
91548@@ -102,11 +102,6 @@ void gcov_enable_events(void)
91549 }
91550
91551 #ifdef CONFIG_MODULES
91552-static inline int within(void *addr, void *start, unsigned long size)
91553-{
91554- return ((addr >= start) && (addr < start + size));
91555-}
91556-
91557 /* Update list and generate events when modules are unloaded. */
91558 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
91559 void *data)
91560@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
91561 prev = NULL;
91562 /* Remove entries located in module from linked list. */
91563 for (info = gcov_info_head; info; info = info->next) {
91564- if (within(info, mod->module_core, mod->core_size)) {
91565+ if (within_module_core_rw((unsigned long)info, mod)) {
91566 if (prev)
91567 prev->next = info->next;
91568 else
91569diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
91570index a6e9d00..a0da4f9 100644
91571--- a/kernel/hrtimer.c
91572+++ b/kernel/hrtimer.c
91573@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
91574 local_irq_restore(flags);
91575 }
91576
91577-static void run_hrtimer_softirq(struct softirq_action *h)
91578+static void run_hrtimer_softirq(void)
91579 {
91580 hrtimer_peek_ahead_timers();
91581 }
91582diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
91583index 8b6b8b6..6bc87df 100644
91584--- a/kernel/kallsyms.c
91585+++ b/kernel/kallsyms.c
91586@@ -11,6 +11,9 @@
91587 * Changed the compression method from stem compression to "table lookup"
91588 * compression (see scripts/kallsyms.c for a more complete description)
91589 */
91590+#ifdef CONFIG_GRKERNSEC_HIDESYM
91591+#define __INCLUDED_BY_HIDESYM 1
91592+#endif
91593 #include <linux/kallsyms.h>
91594 #include <linux/module.h>
91595 #include <linux/init.h>
91596@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
91597
91598 static inline int is_kernel_inittext(unsigned long addr)
91599 {
91600+ if (system_state != SYSTEM_BOOTING)
91601+ return 0;
91602+
91603 if (addr >= (unsigned long)_sinittext
91604 && addr <= (unsigned long)_einittext)
91605 return 1;
91606 return 0;
91607 }
91608
91609+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91610+#ifdef CONFIG_MODULES
91611+static inline int is_module_text(unsigned long addr)
91612+{
91613+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
91614+ return 1;
91615+
91616+ addr = ktla_ktva(addr);
91617+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
91618+}
91619+#else
91620+static inline int is_module_text(unsigned long addr)
91621+{
91622+ return 0;
91623+}
91624+#endif
91625+#endif
91626+
91627 static inline int is_kernel_text(unsigned long addr)
91628 {
91629 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
91630@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr)
91631
91632 static inline int is_kernel(unsigned long addr)
91633 {
91634+
91635+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91636+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
91637+ return 1;
91638+
91639+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
91640+#else
91641 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
91642+#endif
91643+
91644 return 1;
91645 return in_gate_area_no_task(addr);
91646 }
91647
91648 static int is_ksym_addr(unsigned long addr)
91649 {
91650+
91651+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91652+ if (is_module_text(addr))
91653+ return 0;
91654+#endif
91655+
91656 if (all_var)
91657 return is_kernel(addr);
91658
91659@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
91660
91661 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
91662 {
91663- iter->name[0] = '\0';
91664 iter->nameoff = get_symbol_offset(new_pos);
91665 iter->pos = new_pos;
91666 }
91667@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p)
91668 {
91669 struct kallsym_iter *iter = m->private;
91670
91671+#ifdef CONFIG_GRKERNSEC_HIDESYM
91672+ if (current_uid())
91673+ return 0;
91674+#endif
91675+
91676 /* Some debugging symbols have no name. Ignore them. */
91677 if (!iter->name[0])
91678 return 0;
91679@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
91680 struct kallsym_iter *iter;
91681 int ret;
91682
91683- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
91684+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
91685 if (!iter)
91686 return -ENOMEM;
91687 reset_iter(iter, 0);
91688diff --git a/kernel/kexec.c b/kernel/kexec.c
91689index f336e21..9c1c20b 100644
91690--- a/kernel/kexec.c
91691+++ b/kernel/kexec.c
91692@@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
91693 unsigned long flags)
91694 {
91695 struct compat_kexec_segment in;
91696- struct kexec_segment out, __user *ksegments;
91697+ struct kexec_segment out;
91698+ struct kexec_segment __user *ksegments;
91699 unsigned long i, result;
91700
91701 /* Don't allow clients that don't understand the native
91702diff --git a/kernel/kgdb.c b/kernel/kgdb.c
91703index 53dae4b..9ba3743 100644
91704--- a/kernel/kgdb.c
91705+++ b/kernel/kgdb.c
91706@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
91707 /* Guard for recursive entry */
91708 static int exception_level;
91709
91710-static struct kgdb_io *kgdb_io_ops;
91711+static const struct kgdb_io *kgdb_io_ops;
91712 static DEFINE_SPINLOCK(kgdb_registration_lock);
91713
91714 /* kgdb console driver is loaded */
91715@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1);
91716 */
91717 static atomic_t passive_cpu_wait[NR_CPUS];
91718 static atomic_t cpu_in_kgdb[NR_CPUS];
91719-atomic_t kgdb_setting_breakpoint;
91720+atomic_unchecked_t kgdb_setting_breakpoint;
91721
91722 struct task_struct *kgdb_usethread;
91723 struct task_struct *kgdb_contthread;
91724@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES +
91725 sizeof(unsigned long)];
91726
91727 /* to keep track of the CPU which is doing the single stepping*/
91728-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
91729+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
91730
91731 /*
91732 * If you are debugging a problem where roundup (the collection of
91733@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
91734 return 0;
91735 if (kgdb_connected)
91736 return 1;
91737- if (atomic_read(&kgdb_setting_breakpoint))
91738+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
91739 return 1;
91740 if (print_wait)
91741 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
91742@@ -1426,8 +1426,8 @@ acquirelock:
91743 * instance of the exception handler wanted to come into the
91744 * debugger on a different CPU via a single step
91745 */
91746- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
91747- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
91748+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
91749+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
91750
91751 atomic_set(&kgdb_active, -1);
91752 touch_softlockup_watchdog();
91753@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void)
91754 *
91755 * Register it with the KGDB core.
91756 */
91757-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
91758+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
91759 {
91760 int err;
91761
91762@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module);
91763 *
91764 * Unregister it with the KGDB core.
91765 */
91766-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
91767+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
91768 {
91769 BUG_ON(kgdb_connected);
91770
91771@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
91772 */
91773 void kgdb_breakpoint(void)
91774 {
91775- atomic_set(&kgdb_setting_breakpoint, 1);
91776+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
91777 wmb(); /* Sync point before breakpoint */
91778 arch_kgdb_breakpoint();
91779 wmb(); /* Sync point after breakpoint */
91780- atomic_set(&kgdb_setting_breakpoint, 0);
91781+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
91782 }
91783 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
91784
91785diff --git a/kernel/kmod.c b/kernel/kmod.c
91786index a061472..40884b6 100644
91787--- a/kernel/kmod.c
91788+++ b/kernel/kmod.c
91789@@ -68,13 +68,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
91790 * If module auto-loading support is disabled then this function
91791 * becomes a no-operation.
91792 */
91793-int __request_module(bool wait, const char *fmt, ...)
91794+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
91795 {
91796- va_list args;
91797 char module_name[MODULE_NAME_LEN];
91798 unsigned int max_modprobes;
91799 int ret;
91800- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
91801+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
91802 static char *envp[] = { "HOME=/",
91803 "TERM=linux",
91804 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
91805@@ -87,12 +86,24 @@ int __request_module(bool wait, const char *fmt, ...)
91806 if (ret)
91807 return ret;
91808
91809- va_start(args, fmt);
91810- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
91811- va_end(args);
91812+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
91813 if (ret >= MODULE_NAME_LEN)
91814 return -ENAMETOOLONG;
91815
91816+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91817+ if (!current_uid()) {
91818+ /* hack to workaround consolekit/udisks stupidity */
91819+ read_lock(&tasklist_lock);
91820+ if (!strcmp(current->comm, "mount") &&
91821+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
91822+ read_unlock(&tasklist_lock);
91823+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
91824+ return -EPERM;
91825+ }
91826+ read_unlock(&tasklist_lock);
91827+ }
91828+#endif
91829+
91830 /* If modprobe needs a service that is in a module, we get a recursive
91831 * loop. Limit the number of running kmod threads to max_threads/2 or
91832 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
91833@@ -126,6 +137,48 @@ int __request_module(bool wait, const char *fmt, ...)
91834 atomic_dec(&kmod_concurrent);
91835 return ret;
91836 }
91837+
91838+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
91839+{
91840+ va_list args;
91841+ int ret;
91842+
91843+ va_start(args, fmt);
91844+ ret = ____request_module(wait, module_param, fmt, args);
91845+ va_end(args);
91846+
91847+ return ret;
91848+}
91849+
91850+int __request_module(bool wait, const char *fmt, ...)
91851+{
91852+ va_list args;
91853+ int ret;
91854+
91855+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91856+ if (current_uid()) {
91857+ char module_param[MODULE_NAME_LEN];
91858+
91859+ memset(module_param, 0, sizeof(module_param));
91860+
91861+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
91862+
91863+ va_start(args, fmt);
91864+ ret = ____request_module(wait, module_param, fmt, args);
91865+ va_end(args);
91866+
91867+ return ret;
91868+ }
91869+#endif
91870+
91871+ va_start(args, fmt);
91872+ ret = ____request_module(wait, NULL, fmt, args);
91873+ va_end(args);
91874+
91875+ return ret;
91876+}
91877+
91878+
91879 EXPORT_SYMBOL(__request_module);
91880 #endif /* CONFIG_MODULES */
91881
91882@@ -231,7 +284,7 @@ static int wait_for_helper(void *data)
91883 *
91884 * Thus the __user pointer cast is valid here.
91885 */
91886- sys_wait4(pid, (int __user *)&ret, 0, NULL);
91887+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
91888
91889 /*
91890 * If ret is 0, either ____call_usermodehelper failed and the
91891diff --git a/kernel/kprobes.c b/kernel/kprobes.c
91892index 176d825..77fa8ea 100644
91893--- a/kernel/kprobes.c
91894+++ b/kernel/kprobes.c
91895@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
91896 * kernel image and loaded module images reside. This is required
91897 * so x86_64 can correctly handle the %rip-relative fixups.
91898 */
91899- kip->insns = module_alloc(PAGE_SIZE);
91900+ kip->insns = module_alloc_exec(PAGE_SIZE);
91901 if (!kip->insns) {
91902 kfree(kip);
91903 return NULL;
91904@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
91905 */
91906 if (!list_is_singular(&kprobe_insn_pages)) {
91907 list_del(&kip->list);
91908- module_free(NULL, kip->insns);
91909+ module_free_exec(NULL, kip->insns);
91910 kfree(kip);
91911 }
91912 return 1;
91913@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
91914 {
91915 int i, err = 0;
91916 unsigned long offset = 0, size = 0;
91917- char *modname, namebuf[128];
91918+ char *modname, namebuf[KSYM_NAME_LEN];
91919 const char *symbol_name;
91920 void *addr;
91921 struct kprobe_blackpoint *kb;
91922@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
91923 const char *sym = NULL;
91924 unsigned int i = *(loff_t *) v;
91925 unsigned long offset = 0;
91926- char *modname, namebuf[128];
91927+ char *modname, namebuf[KSYM_NAME_LEN];
91928
91929 head = &kprobe_table[i];
91930 preempt_disable();
91931diff --git a/kernel/lockdep.c b/kernel/lockdep.c
91932index d86fe89..d12fc66 100644
91933--- a/kernel/lockdep.c
91934+++ b/kernel/lockdep.c
91935@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = {
91936 /*
91937 * Various lockdep statistics:
91938 */
91939-atomic_t chain_lookup_hits;
91940-atomic_t chain_lookup_misses;
91941-atomic_t hardirqs_on_events;
91942-atomic_t hardirqs_off_events;
91943-atomic_t redundant_hardirqs_on;
91944-atomic_t redundant_hardirqs_off;
91945-atomic_t softirqs_on_events;
91946-atomic_t softirqs_off_events;
91947-atomic_t redundant_softirqs_on;
91948-atomic_t redundant_softirqs_off;
91949-atomic_t nr_unused_locks;
91950-atomic_t nr_cyclic_checks;
91951-atomic_t nr_find_usage_forwards_checks;
91952-atomic_t nr_find_usage_backwards_checks;
91953+atomic_unchecked_t chain_lookup_hits;
91954+atomic_unchecked_t chain_lookup_misses;
91955+atomic_unchecked_t hardirqs_on_events;
91956+atomic_unchecked_t hardirqs_off_events;
91957+atomic_unchecked_t redundant_hardirqs_on;
91958+atomic_unchecked_t redundant_hardirqs_off;
91959+atomic_unchecked_t softirqs_on_events;
91960+atomic_unchecked_t softirqs_off_events;
91961+atomic_unchecked_t redundant_softirqs_on;
91962+atomic_unchecked_t redundant_softirqs_off;
91963+atomic_unchecked_t nr_unused_locks;
91964+atomic_unchecked_t nr_cyclic_checks;
91965+atomic_unchecked_t nr_find_usage_forwards_checks;
91966+atomic_unchecked_t nr_find_usage_backwards_checks;
91967 #endif
91968
91969 /*
91970@@ -577,6 +577,10 @@ static int static_obj(void *obj)
91971 int i;
91972 #endif
91973
91974+#ifdef CONFIG_PAX_KERNEXEC
91975+ start = ktla_ktva(start);
91976+#endif
91977+
91978 /*
91979 * static variable?
91980 */
91981@@ -592,8 +596,7 @@ static int static_obj(void *obj)
91982 */
91983 for_each_possible_cpu(i) {
91984 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
91985- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
91986- + per_cpu_offset(i);
91987+ end = start + PERCPU_ENOUGH_ROOM;
91988
91989 if ((addr >= start) && (addr < end))
91990 return 1;
91991@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
91992 if (!static_obj(lock->key)) {
91993 debug_locks_off();
91994 printk("INFO: trying to register non-static key.\n");
91995+ printk("lock:%pS key:%pS.\n", lock, lock->key);
91996 printk("the code is fine but needs lockdep annotation.\n");
91997 printk("turning off the locking correctness validator.\n");
91998 dump_stack();
91999@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
92000 if (!class)
92001 return 0;
92002 }
92003- debug_atomic_inc((atomic_t *)&class->ops);
92004+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
92005 if (very_verbose(class)) {
92006 printk("\nacquire class [%p] %s", class->key, class->name);
92007 if (class->name_version > 1)
92008diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
92009index a2ee95a..092f0f2 100644
92010--- a/kernel/lockdep_internals.h
92011+++ b/kernel/lockdep_internals.h
92012@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class)
92013 /*
92014 * Various lockdep statistics:
92015 */
92016-extern atomic_t chain_lookup_hits;
92017-extern atomic_t chain_lookup_misses;
92018-extern atomic_t hardirqs_on_events;
92019-extern atomic_t hardirqs_off_events;
92020-extern atomic_t redundant_hardirqs_on;
92021-extern atomic_t redundant_hardirqs_off;
92022-extern atomic_t softirqs_on_events;
92023-extern atomic_t softirqs_off_events;
92024-extern atomic_t redundant_softirqs_on;
92025-extern atomic_t redundant_softirqs_off;
92026-extern atomic_t nr_unused_locks;
92027-extern atomic_t nr_cyclic_checks;
92028-extern atomic_t nr_cyclic_check_recursions;
92029-extern atomic_t nr_find_usage_forwards_checks;
92030-extern atomic_t nr_find_usage_forwards_recursions;
92031-extern atomic_t nr_find_usage_backwards_checks;
92032-extern atomic_t nr_find_usage_backwards_recursions;
92033-# define debug_atomic_inc(ptr) atomic_inc(ptr)
92034-# define debug_atomic_dec(ptr) atomic_dec(ptr)
92035-# define debug_atomic_read(ptr) atomic_read(ptr)
92036+extern atomic_unchecked_t chain_lookup_hits;
92037+extern atomic_unchecked_t chain_lookup_misses;
92038+extern atomic_unchecked_t hardirqs_on_events;
92039+extern atomic_unchecked_t hardirqs_off_events;
92040+extern atomic_unchecked_t redundant_hardirqs_on;
92041+extern atomic_unchecked_t redundant_hardirqs_off;
92042+extern atomic_unchecked_t softirqs_on_events;
92043+extern atomic_unchecked_t softirqs_off_events;
92044+extern atomic_unchecked_t redundant_softirqs_on;
92045+extern atomic_unchecked_t redundant_softirqs_off;
92046+extern atomic_unchecked_t nr_unused_locks;
92047+extern atomic_unchecked_t nr_cyclic_checks;
92048+extern atomic_unchecked_t nr_cyclic_check_recursions;
92049+extern atomic_unchecked_t nr_find_usage_forwards_checks;
92050+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
92051+extern atomic_unchecked_t nr_find_usage_backwards_checks;
92052+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
92053+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
92054+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
92055+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
92056 #else
92057 # define debug_atomic_inc(ptr) do { } while (0)
92058 # define debug_atomic_dec(ptr) do { } while (0)
92059diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
92060index d4aba4f..02a353f 100644
92061--- a/kernel/lockdep_proc.c
92062+++ b/kernel/lockdep_proc.c
92063@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
92064
92065 static void print_name(struct seq_file *m, struct lock_class *class)
92066 {
92067- char str[128];
92068+ char str[KSYM_NAME_LEN];
92069 const char *name = class->name;
92070
92071 if (!name) {
92072diff --git a/kernel/module.c b/kernel/module.c
92073index 4b270e6..2efdb65 100644
92074--- a/kernel/module.c
92075+++ b/kernel/module.c
92076@@ -55,6 +55,7 @@
92077 #include <linux/async.h>
92078 #include <linux/percpu.h>
92079 #include <linux/kmemleak.h>
92080+#include <linux/grsecurity.h>
92081
92082 #define CREATE_TRACE_POINTS
92083 #include <trace/events/module.h>
92084@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
92085 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
92086
92087 /* Bounds of module allocation, for speeding __module_address */
92088-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
92089+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
92090+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
92091
92092 int register_module_notifier(struct notifier_block * nb)
92093 {
92094@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
92095 return true;
92096
92097 list_for_each_entry_rcu(mod, &modules, list) {
92098- struct symsearch arr[] = {
92099+ struct symsearch modarr[] = {
92100 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
92101 NOT_GPL_ONLY, false },
92102 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
92103@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
92104 #endif
92105 };
92106
92107- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
92108+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
92109 return true;
92110 }
92111 return false;
92112@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
92113 void *ptr;
92114 int cpu;
92115
92116- if (align > PAGE_SIZE) {
92117+ if (align-1 >= PAGE_SIZE) {
92118 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
92119 name, align, PAGE_SIZE);
92120 align = PAGE_SIZE;
92121@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
92122 * /sys/module/foo/sections stuff
92123 * J. Corbet <corbet@lwn.net>
92124 */
92125-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
92126+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
92127
92128 static inline bool sect_empty(const Elf_Shdr *sect)
92129 {
92130@@ -1545,7 +1547,8 @@ static void free_module(struct module *mod)
92131 destroy_params(mod->kp, mod->num_kp);
92132
92133 /* This may be NULL, but that's OK */
92134- module_free(mod, mod->module_init);
92135+ module_free(mod, mod->module_init_rw);
92136+ module_free_exec(mod, mod->module_init_rx);
92137 kfree(mod->args);
92138 if (mod->percpu)
92139 percpu_modfree(mod->percpu);
92140@@ -1554,10 +1557,12 @@ static void free_module(struct module *mod)
92141 percpu_modfree(mod->refptr);
92142 #endif
92143 /* Free lock-classes: */
92144- lockdep_free_key_range(mod->module_core, mod->core_size);
92145+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
92146+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
92147
92148 /* Finally, free the core (containing the module structure) */
92149- module_free(mod, mod->module_core);
92150+ module_free_exec(mod, mod->module_core_rx);
92151+ module_free(mod, mod->module_core_rw);
92152
92153 #ifdef CONFIG_MPU
92154 update_protections(current->mm);
92155@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
92156 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
92157 int ret = 0;
92158 const struct kernel_symbol *ksym;
92159+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92160+ int is_fs_load = 0;
92161+ int register_filesystem_found = 0;
92162+ char *p;
92163+
92164+ p = strstr(mod->args, "grsec_modharden_fs");
92165+
92166+ if (p) {
92167+ char *endptr = p + strlen("grsec_modharden_fs");
92168+ /* copy \0 as well */
92169+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
92170+ is_fs_load = 1;
92171+ }
92172+#endif
92173+
92174
92175 for (i = 1; i < n; i++) {
92176+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92177+ const char *name = strtab + sym[i].st_name;
92178+
92179+ /* it's a real shame this will never get ripped and copied
92180+ upstream! ;(
92181+ */
92182+ if (is_fs_load && !strcmp(name, "register_filesystem"))
92183+ register_filesystem_found = 1;
92184+#endif
92185 switch (sym[i].st_shndx) {
92186 case SHN_COMMON:
92187 /* We compiled with -fno-common. These are not
92188@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
92189 strtab + sym[i].st_name, mod);
92190 /* Ok if resolved. */
92191 if (ksym) {
92192+ pax_open_kernel();
92193 sym[i].st_value = ksym->value;
92194+ pax_close_kernel();
92195 break;
92196 }
92197
92198@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
92199 secbase = (unsigned long)mod->percpu;
92200 else
92201 secbase = sechdrs[sym[i].st_shndx].sh_addr;
92202+ pax_open_kernel();
92203 sym[i].st_value += secbase;
92204+ pax_close_kernel();
92205 break;
92206 }
92207 }
92208
92209+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92210+ if (is_fs_load && !register_filesystem_found) {
92211+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
92212+ ret = -EPERM;
92213+ }
92214+#endif
92215+
92216 return ret;
92217 }
92218
92219@@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod,
92220 || s->sh_entsize != ~0UL
92221 || strstarts(secstrings + s->sh_name, ".init"))
92222 continue;
92223- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
92224+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
92225+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
92226+ else
92227+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
92228 DEBUGP("\t%s\n", secstrings + s->sh_name);
92229 }
92230- if (m == 0)
92231- mod->core_text_size = mod->core_size;
92232 }
92233
92234 DEBUGP("Init section allocation order:\n");
92235@@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod,
92236 || s->sh_entsize != ~0UL
92237 || !strstarts(secstrings + s->sh_name, ".init"))
92238 continue;
92239- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
92240- | INIT_OFFSET_MASK);
92241+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
92242+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
92243+ else
92244+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
92245+ s->sh_entsize |= INIT_OFFSET_MASK;
92246 DEBUGP("\t%s\n", secstrings + s->sh_name);
92247 }
92248- if (m == 0)
92249- mod->init_text_size = mod->init_size;
92250 }
92251 }
92252
92253@@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value,
92254
92255 /* As per nm */
92256 static char elf_type(const Elf_Sym *sym,
92257- Elf_Shdr *sechdrs,
92258- const char *secstrings,
92259- struct module *mod)
92260+ const Elf_Shdr *sechdrs,
92261+ const char *secstrings)
92262 {
92263 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
92264 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
92265@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod,
92266
92267 /* Put symbol section at end of init part of module. */
92268 symsect->sh_flags |= SHF_ALLOC;
92269- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
92270+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
92271 symindex) | INIT_OFFSET_MASK;
92272 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
92273
92274@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod,
92275 }
92276
92277 /* Append room for core symbols at end of core part. */
92278- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
92279- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
92280+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
92281+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
92282
92283 /* Put string table section at end of init part of module. */
92284 strsect->sh_flags |= SHF_ALLOC;
92285- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
92286+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
92287 strindex) | INIT_OFFSET_MASK;
92288 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
92289
92290 /* Append room for core symbols' strings at end of core part. */
92291- *pstroffs = mod->core_size;
92292+ *pstroffs = mod->core_size_rx;
92293 __set_bit(0, strmap);
92294- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
92295+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
92296
92297 return symoffs;
92298 }
92299@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod,
92300 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
92301 mod->strtab = (void *)sechdrs[strindex].sh_addr;
92302
92303+ pax_open_kernel();
92304+
92305 /* Set types up while we still have access to sections. */
92306 for (i = 0; i < mod->num_symtab; i++)
92307 mod->symtab[i].st_info
92308- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
92309+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
92310
92311- mod->core_symtab = dst = mod->module_core + symoffs;
92312+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
92313 src = mod->symtab;
92314 *dst = *src;
92315 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
92316@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod,
92317 }
92318 mod->core_num_syms = ndst;
92319
92320- mod->core_strtab = s = mod->module_core + stroffs;
92321+ mod->core_strtab = s = mod->module_core_rx + stroffs;
92322 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
92323 if (test_bit(i, strmap))
92324 *++s = mod->strtab[i];
92325+
92326+ pax_close_kernel();
92327 }
92328 #else
92329 static inline unsigned long layout_symtab(struct module *mod,
92330@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
92331 #endif
92332 }
92333
92334-static void *module_alloc_update_bounds(unsigned long size)
92335+static void *module_alloc_update_bounds_rw(unsigned long size)
92336 {
92337 void *ret = module_alloc(size);
92338
92339 if (ret) {
92340 /* Update module bounds. */
92341- if ((unsigned long)ret < module_addr_min)
92342- module_addr_min = (unsigned long)ret;
92343- if ((unsigned long)ret + size > module_addr_max)
92344- module_addr_max = (unsigned long)ret + size;
92345+ if ((unsigned long)ret < module_addr_min_rw)
92346+ module_addr_min_rw = (unsigned long)ret;
92347+ if ((unsigned long)ret + size > module_addr_max_rw)
92348+ module_addr_max_rw = (unsigned long)ret + size;
92349+ }
92350+ return ret;
92351+}
92352+
92353+static void *module_alloc_update_bounds_rx(unsigned long size)
92354+{
92355+ void *ret = module_alloc_exec(size);
92356+
92357+ if (ret) {
92358+ /* Update module bounds. */
92359+ if ((unsigned long)ret < module_addr_min_rx)
92360+ module_addr_min_rx = (unsigned long)ret;
92361+ if ((unsigned long)ret + size > module_addr_max_rx)
92362+ module_addr_max_rx = (unsigned long)ret + size;
92363 }
92364 return ret;
92365 }
92366@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
92367 unsigned int i;
92368
92369 /* only scan the sections containing data */
92370- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
92371- (unsigned long)mod->module_core,
92372+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
92373+ (unsigned long)mod->module_core_rw,
92374 sizeof(struct module), GFP_KERNEL);
92375
92376 for (i = 1; i < hdr->e_shnum; i++) {
92377@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
92378 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
92379 continue;
92380
92381- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
92382- (unsigned long)mod->module_core,
92383+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
92384+ (unsigned long)mod->module_core_rw,
92385 sechdrs[i].sh_size, GFP_KERNEL);
92386 }
92387 }
92388@@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod,
92389 Elf_Ehdr *hdr;
92390 Elf_Shdr *sechdrs;
92391 char *secstrings, *args, *modmagic, *strtab = NULL;
92392- char *staging;
92393+ char *staging, *license;
92394 unsigned int i;
92395 unsigned int symindex = 0;
92396 unsigned int strindex = 0;
92397@@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod,
92398 goto free_hdr;
92399 }
92400
92401+ license = get_modinfo(sechdrs, infoindex, "license");
92402+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
92403+ if (!license || !license_is_gpl_compatible(license)) {
92404+ err = -ENOEXEC;
92405+ goto free_hdr;
92406+ }
92407+#endif
92408+
92409 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
92410 /* This is allowed: modprobe --force will invalidate it. */
92411 if (!modmagic) {
92412@@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod,
92413 secstrings, &stroffs, strmap);
92414
92415 /* Do the allocs. */
92416- ptr = module_alloc_update_bounds(mod->core_size);
92417+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
92418 /*
92419 * The pointer to this block is stored in the module structure
92420 * which is inside the block. Just mark it as not being a
92421@@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod,
92422 err = -ENOMEM;
92423 goto free_percpu;
92424 }
92425- memset(ptr, 0, mod->core_size);
92426- mod->module_core = ptr;
92427+ memset(ptr, 0, mod->core_size_rw);
92428+ mod->module_core_rw = ptr;
92429
92430- ptr = module_alloc_update_bounds(mod->init_size);
92431+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
92432 /*
92433 * The pointer to this block is stored in the module structure
92434 * which is inside the block. This block doesn't need to be
92435 * scanned as it contains data and code that will be freed
92436 * after the module is initialized.
92437 */
92438- kmemleak_ignore(ptr);
92439- if (!ptr && mod->init_size) {
92440+ kmemleak_not_leak(ptr);
92441+ if (!ptr && mod->init_size_rw) {
92442 err = -ENOMEM;
92443- goto free_core;
92444+ goto free_core_rw;
92445 }
92446- memset(ptr, 0, mod->init_size);
92447- mod->module_init = ptr;
92448+ memset(ptr, 0, mod->init_size_rw);
92449+ mod->module_init_rw = ptr;
92450+
92451+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
92452+ kmemleak_not_leak(ptr);
92453+ if (!ptr) {
92454+ err = -ENOMEM;
92455+ goto free_init_rw;
92456+ }
92457+
92458+ pax_open_kernel();
92459+ memset(ptr, 0, mod->core_size_rx);
92460+ pax_close_kernel();
92461+ mod->module_core_rx = ptr;
92462+
92463+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
92464+ kmemleak_not_leak(ptr);
92465+ if (!ptr && mod->init_size_rx) {
92466+ err = -ENOMEM;
92467+ goto free_core_rx;
92468+ }
92469+
92470+ pax_open_kernel();
92471+ memset(ptr, 0, mod->init_size_rx);
92472+ pax_close_kernel();
92473+ mod->module_init_rx = ptr;
92474
92475 /* Transfer each section which specifies SHF_ALLOC */
92476 DEBUGP("final section addresses:\n");
92477@@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod,
92478 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
92479 continue;
92480
92481- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
92482- dest = mod->module_init
92483- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
92484- else
92485- dest = mod->module_core + sechdrs[i].sh_entsize;
92486+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
92487+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
92488+ dest = mod->module_init_rw
92489+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
92490+ else
92491+ dest = mod->module_init_rx
92492+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
92493+ } else {
92494+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
92495+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
92496+ else
92497+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
92498+ }
92499
92500- if (sechdrs[i].sh_type != SHT_NOBITS)
92501- memcpy(dest, (void *)sechdrs[i].sh_addr,
92502- sechdrs[i].sh_size);
92503+ if (sechdrs[i].sh_type != SHT_NOBITS) {
92504+
92505+#ifdef CONFIG_PAX_KERNEXEC
92506+#ifdef CONFIG_X86_64
92507+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
92508+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
92509+#endif
92510+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
92511+ pax_open_kernel();
92512+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
92513+ pax_close_kernel();
92514+ } else
92515+#endif
92516+
92517+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
92518+ }
92519 /* Update sh_addr to point to copy in image. */
92520- sechdrs[i].sh_addr = (unsigned long)dest;
92521+
92522+#ifdef CONFIG_PAX_KERNEXEC
92523+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
92524+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
92525+ else
92526+#endif
92527+
92528+ sechdrs[i].sh_addr = (unsigned long)dest;
92529 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
92530 }
92531 /* Module has been moved. */
92532@@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod,
92533 mod->name);
92534 if (!mod->refptr) {
92535 err = -ENOMEM;
92536- goto free_init;
92537+ goto free_init_rx;
92538 }
92539 #endif
92540 /* Now we've moved module, initialize linked lists, etc. */
92541@@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod,
92542 goto free_unload;
92543
92544 /* Set up license info based on the info section */
92545- set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
92546+ set_license(mod, license);
92547
92548 /*
92549 * ndiswrapper is under GPL by itself, but loads proprietary modules.
92550@@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod,
92551 /* Set up MODINFO_ATTR fields */
92552 setup_modinfo(mod, sechdrs, infoindex);
92553
92554+ mod->args = args;
92555+
92556+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92557+ {
92558+ char *p, *p2;
92559+
92560+ if (strstr(mod->args, "grsec_modharden_netdev")) {
92561+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
92562+ err = -EPERM;
92563+ goto cleanup;
92564+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
92565+ p += strlen("grsec_modharden_normal");
92566+ p2 = strstr(p, "_");
92567+ if (p2) {
92568+ *p2 = '\0';
92569+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
92570+ *p2 = '_';
92571+ }
92572+ err = -EPERM;
92573+ goto cleanup;
92574+ }
92575+ }
92576+#endif
92577+
92578+
92579 /* Fix up syms, so that st_value is a pointer to location. */
92580 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
92581 mod);
92582@@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod,
92583
92584 /* Now do relocations. */
92585 for (i = 1; i < hdr->e_shnum; i++) {
92586- const char *strtab = (char *)sechdrs[strindex].sh_addr;
92587 unsigned int info = sechdrs[i].sh_info;
92588+ strtab = (char *)sechdrs[strindex].sh_addr;
92589
92590 /* Not a valid relocation section? */
92591 if (info >= hdr->e_shnum)
92592@@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod,
92593 * Do it before processing of module parameters, so the module
92594 * can provide parameter accessor functions of its own.
92595 */
92596- if (mod->module_init)
92597- flush_icache_range((unsigned long)mod->module_init,
92598- (unsigned long)mod->module_init
92599- + mod->init_size);
92600- flush_icache_range((unsigned long)mod->module_core,
92601- (unsigned long)mod->module_core + mod->core_size);
92602+ if (mod->module_init_rx)
92603+ flush_icache_range((unsigned long)mod->module_init_rx,
92604+ (unsigned long)mod->module_init_rx
92605+ + mod->init_size_rx);
92606+ flush_icache_range((unsigned long)mod->module_core_rx,
92607+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
92608
92609 set_fs(old_fs);
92610
92611- mod->args = args;
92612 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
92613 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
92614 mod->name);
92615@@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod,
92616 free_unload:
92617 module_unload_free(mod);
92618 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
92619+ free_init_rx:
92620 percpu_modfree(mod->refptr);
92621- free_init:
92622 #endif
92623- module_free(mod, mod->module_init);
92624- free_core:
92625- module_free(mod, mod->module_core);
92626+ module_free_exec(mod, mod->module_init_rx);
92627+ free_core_rx:
92628+ module_free_exec(mod, mod->module_core_rx);
92629+ free_init_rw:
92630+ module_free(mod, mod->module_init_rw);
92631+ free_core_rw:
92632+ module_free(mod, mod->module_core_rw);
92633 /* mod will be freed with core. Don't access it beyond this line! */
92634 free_percpu:
92635 if (percpu)
92636@@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
92637 mod->symtab = mod->core_symtab;
92638 mod->strtab = mod->core_strtab;
92639 #endif
92640- module_free(mod, mod->module_init);
92641- mod->module_init = NULL;
92642- mod->init_size = 0;
92643- mod->init_text_size = 0;
92644+ module_free(mod, mod->module_init_rw);
92645+ module_free_exec(mod, mod->module_init_rx);
92646+ mod->module_init_rw = NULL;
92647+ mod->module_init_rx = NULL;
92648+ mod->init_size_rw = 0;
92649+ mod->init_size_rx = 0;
92650 mutex_unlock(&module_mutex);
92651
92652 return 0;
92653@@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod,
92654 unsigned long nextval;
92655
92656 /* At worse, next value is at end of module */
92657- if (within_module_init(addr, mod))
92658- nextval = (unsigned long)mod->module_init+mod->init_text_size;
92659+ if (within_module_init_rx(addr, mod))
92660+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
92661+ else if (within_module_init_rw(addr, mod))
92662+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
92663+ else if (within_module_core_rx(addr, mod))
92664+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
92665+ else if (within_module_core_rw(addr, mod))
92666+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
92667 else
92668- nextval = (unsigned long)mod->module_core+mod->core_text_size;
92669+ return NULL;
92670
92671 /* Scan for closest preceeding symbol, and next symbol. (ELF
92672 starts real symbols at 1). */
92673@@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p)
92674 char buf[8];
92675
92676 seq_printf(m, "%s %u",
92677- mod->name, mod->init_size + mod->core_size);
92678+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
92679 print_unload_info(m, mod);
92680
92681 /* Informative for users. */
92682@@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p)
92683 mod->state == MODULE_STATE_COMING ? "Loading":
92684 "Live");
92685 /* Used by oprofile and other similar tools. */
92686- seq_printf(m, " 0x%p", mod->module_core);
92687+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
92688
92689 /* Taints info */
92690 if (mod->taints)
92691@@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = {
92692
92693 static int __init proc_modules_init(void)
92694 {
92695+#ifndef CONFIG_GRKERNSEC_HIDESYM
92696+#ifdef CONFIG_GRKERNSEC_PROC_USER
92697+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
92698+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92699+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
92700+#else
92701 proc_create("modules", 0, NULL, &proc_modules_operations);
92702+#endif
92703+#else
92704+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
92705+#endif
92706 return 0;
92707 }
92708 module_init(proc_modules_init);
92709@@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr)
92710 {
92711 struct module *mod;
92712
92713- if (addr < module_addr_min || addr > module_addr_max)
92714+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
92715+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
92716 return NULL;
92717
92718 list_for_each_entry_rcu(mod, &modules, list)
92719- if (within_module_core(addr, mod)
92720- || within_module_init(addr, mod))
92721+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
92722 return mod;
92723 return NULL;
92724 }
92725@@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr)
92726 */
92727 struct module *__module_text_address(unsigned long addr)
92728 {
92729- struct module *mod = __module_address(addr);
92730+ struct module *mod;
92731+
92732+#ifdef CONFIG_X86_32
92733+ addr = ktla_ktva(addr);
92734+#endif
92735+
92736+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
92737+ return NULL;
92738+
92739+ mod = __module_address(addr);
92740+
92741 if (mod) {
92742 /* Make sure it's within the text section. */
92743- if (!within(addr, mod->module_init, mod->init_text_size)
92744- && !within(addr, mod->module_core, mod->core_text_size))
92745+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
92746 mod = NULL;
92747 }
92748 return mod;
92749diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
92750index ec815a9..fe46e99 100644
92751--- a/kernel/mutex-debug.c
92752+++ b/kernel/mutex-debug.c
92753@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
92754 }
92755
92756 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
92757- struct thread_info *ti)
92758+ struct task_struct *task)
92759 {
92760 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
92761
92762 /* Mark the current thread as blocked on the lock: */
92763- ti->task->blocked_on = waiter;
92764+ task->blocked_on = waiter;
92765 }
92766
92767 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
92768- struct thread_info *ti)
92769+ struct task_struct *task)
92770 {
92771 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
92772- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
92773- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
92774- ti->task->blocked_on = NULL;
92775+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
92776+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
92777+ task->blocked_on = NULL;
92778
92779 list_del_init(&waiter->list);
92780 waiter->task = NULL;
92781@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
92782 return;
92783
92784 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
92785- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
92786+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
92787 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
92788 mutex_clear_owner(lock);
92789 }
92790diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
92791index 6b2d735..372d3c4 100644
92792--- a/kernel/mutex-debug.h
92793+++ b/kernel/mutex-debug.h
92794@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
92795 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
92796 extern void debug_mutex_add_waiter(struct mutex *lock,
92797 struct mutex_waiter *waiter,
92798- struct thread_info *ti);
92799+ struct task_struct *task);
92800 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
92801- struct thread_info *ti);
92802+ struct task_struct *task);
92803 extern void debug_mutex_unlock(struct mutex *lock);
92804 extern void debug_mutex_init(struct mutex *lock, const char *name,
92805 struct lock_class_key *key);
92806
92807 static inline void mutex_set_owner(struct mutex *lock)
92808 {
92809- lock->owner = current_thread_info();
92810+ lock->owner = current;
92811 }
92812
92813 static inline void mutex_clear_owner(struct mutex *lock)
92814diff --git a/kernel/mutex.c b/kernel/mutex.c
92815index f85644c..5ee9f77 100644
92816--- a/kernel/mutex.c
92817+++ b/kernel/mutex.c
92818@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
92819 */
92820
92821 for (;;) {
92822- struct thread_info *owner;
92823+ struct task_struct *owner;
92824
92825 /*
92826 * If we own the BKL, then don't spin. The owner of
92827@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
92828 spin_lock_mutex(&lock->wait_lock, flags);
92829
92830 debug_mutex_lock_common(lock, &waiter);
92831- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
92832+ debug_mutex_add_waiter(lock, &waiter, task);
92833
92834 /* add waiting tasks to the end of the waitqueue (FIFO): */
92835 list_add_tail(&waiter.list, &lock->wait_list);
92836@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
92837 * TASK_UNINTERRUPTIBLE case.)
92838 */
92839 if (unlikely(signal_pending_state(state, task))) {
92840- mutex_remove_waiter(lock, &waiter,
92841- task_thread_info(task));
92842+ mutex_remove_waiter(lock, &waiter, task);
92843 mutex_release(&lock->dep_map, 1, ip);
92844 spin_unlock_mutex(&lock->wait_lock, flags);
92845
92846@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
92847 done:
92848 lock_acquired(&lock->dep_map, ip);
92849 /* got the lock - rejoice! */
92850- mutex_remove_waiter(lock, &waiter, current_thread_info());
92851+ mutex_remove_waiter(lock, &waiter, task);
92852 mutex_set_owner(lock);
92853
92854 /* set it to 0 if there are no waiters left: */
92855diff --git a/kernel/mutex.h b/kernel/mutex.h
92856index 67578ca..4115fbf 100644
92857--- a/kernel/mutex.h
92858+++ b/kernel/mutex.h
92859@@ -19,7 +19,7 @@
92860 #ifdef CONFIG_SMP
92861 static inline void mutex_set_owner(struct mutex *lock)
92862 {
92863- lock->owner = current_thread_info();
92864+ lock->owner = current;
92865 }
92866
92867 static inline void mutex_clear_owner(struct mutex *lock)
92868diff --git a/kernel/panic.c b/kernel/panic.c
92869index 96b45d0..ff70a46 100644
92870--- a/kernel/panic.c
92871+++ b/kernel/panic.c
92872@@ -71,7 +71,11 @@ NORET_TYPE void panic(const char * fmt, ...)
92873 va_end(args);
92874 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
92875 #ifdef CONFIG_DEBUG_BUGVERBOSE
92876- dump_stack();
92877+ /*
92878+ * Avoid nested stack-dumping if a panic occurs during oops processing
92879+ */
92880+ if (!oops_in_progress)
92881+ dump_stack();
92882 #endif
92883
92884 /*
92885@@ -352,7 +356,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc
92886 const char *board;
92887
92888 printk(KERN_WARNING "------------[ cut here ]------------\n");
92889- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
92890+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
92891 board = dmi_get_system_info(DMI_PRODUCT_NAME);
92892 if (board)
92893 printk(KERN_WARNING "Hardware name: %s\n", board);
92894@@ -392,7 +396,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
92895 */
92896 void __stack_chk_fail(void)
92897 {
92898- panic("stack-protector: Kernel stack is corrupted in: %p\n",
92899+ dump_stack();
92900+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
92901 __builtin_return_address(0));
92902 }
92903 EXPORT_SYMBOL(__stack_chk_fail);
92904diff --git a/kernel/params.c b/kernel/params.c
92905index d656c27..21e452c 100644
92906--- a/kernel/params.c
92907+++ b/kernel/params.c
92908@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
92909 return ret;
92910 }
92911
92912-static struct sysfs_ops module_sysfs_ops = {
92913+static const struct sysfs_ops module_sysfs_ops = {
92914 .show = module_attr_show,
92915 .store = module_attr_store,
92916 };
92917@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
92918 return 0;
92919 }
92920
92921-static struct kset_uevent_ops module_uevent_ops = {
92922+static const struct kset_uevent_ops module_uevent_ops = {
92923 .filter = uevent_filter,
92924 };
92925
92926diff --git a/kernel/perf_event.c b/kernel/perf_event.c
92927index 37ebc14..9c121d9 100644
92928--- a/kernel/perf_event.c
92929+++ b/kernel/perf_event.c
92930@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
92931 */
92932 int sysctl_perf_event_sample_rate __read_mostly = 100000;
92933
92934-static atomic64_t perf_event_id;
92935+static atomic64_unchecked_t perf_event_id;
92936
92937 /*
92938 * Lock for (sysadmin-configurable) event reservations:
92939@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
92940 * In order to keep per-task stats reliable we need to flip the event
92941 * values when we flip the contexts.
92942 */
92943- value = atomic64_read(&next_event->count);
92944- value = atomic64_xchg(&event->count, value);
92945- atomic64_set(&next_event->count, value);
92946+ value = atomic64_read_unchecked(&next_event->count);
92947+ value = atomic64_xchg_unchecked(&event->count, value);
92948+ atomic64_set_unchecked(&next_event->count, value);
92949
92950 swap(event->total_time_enabled, next_event->total_time_enabled);
92951 swap(event->total_time_running, next_event->total_time_running);
92952@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event)
92953 update_event_times(event);
92954 }
92955
92956- return atomic64_read(&event->count);
92957+ return atomic64_read_unchecked(&event->count);
92958 }
92959
92960 /*
92961@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event,
92962 values[n++] = 1 + leader->nr_siblings;
92963 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
92964 values[n++] = leader->total_time_enabled +
92965- atomic64_read(&leader->child_total_time_enabled);
92966+ atomic64_read_unchecked(&leader->child_total_time_enabled);
92967 }
92968 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
92969 values[n++] = leader->total_time_running +
92970- atomic64_read(&leader->child_total_time_running);
92971+ atomic64_read_unchecked(&leader->child_total_time_running);
92972 }
92973
92974 size = n * sizeof(u64);
92975@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event,
92976 values[n++] = perf_event_read_value(event);
92977 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
92978 values[n++] = event->total_time_enabled +
92979- atomic64_read(&event->child_total_time_enabled);
92980+ atomic64_read_unchecked(&event->child_total_time_enabled);
92981 }
92982 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
92983 values[n++] = event->total_time_running +
92984- atomic64_read(&event->child_total_time_running);
92985+ atomic64_read_unchecked(&event->child_total_time_running);
92986 }
92987 if (read_format & PERF_FORMAT_ID)
92988 values[n++] = primary_event_id(event);
92989@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
92990 static void perf_event_reset(struct perf_event *event)
92991 {
92992 (void)perf_event_read(event);
92993- atomic64_set(&event->count, 0);
92994+ atomic64_set_unchecked(&event->count, 0);
92995 perf_event_update_userpage(event);
92996 }
92997
92998@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event)
92999 ++userpg->lock;
93000 barrier();
93001 userpg->index = perf_event_index(event);
93002- userpg->offset = atomic64_read(&event->count);
93003+ userpg->offset = atomic64_read_unchecked(&event->count);
93004 if (event->state == PERF_EVENT_STATE_ACTIVE)
93005- userpg->offset -= atomic64_read(&event->hw.prev_count);
93006+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
93007
93008 userpg->time_enabled = event->total_time_enabled +
93009- atomic64_read(&event->child_total_time_enabled);
93010+ atomic64_read_unchecked(&event->child_total_time_enabled);
93011
93012 userpg->time_running = event->total_time_running +
93013- atomic64_read(&event->child_total_time_running);
93014+ atomic64_read_unchecked(&event->child_total_time_running);
93015
93016 barrier();
93017 ++userpg->lock;
93018@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle,
93019 u64 values[4];
93020 int n = 0;
93021
93022- values[n++] = atomic64_read(&event->count);
93023+ values[n++] = atomic64_read_unchecked(&event->count);
93024 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
93025 values[n++] = event->total_time_enabled +
93026- atomic64_read(&event->child_total_time_enabled);
93027+ atomic64_read_unchecked(&event->child_total_time_enabled);
93028 }
93029 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
93030 values[n++] = event->total_time_running +
93031- atomic64_read(&event->child_total_time_running);
93032+ atomic64_read_unchecked(&event->child_total_time_running);
93033 }
93034 if (read_format & PERF_FORMAT_ID)
93035 values[n++] = primary_event_id(event);
93036@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
93037 if (leader != event)
93038 leader->pmu->read(leader);
93039
93040- values[n++] = atomic64_read(&leader->count);
93041+ values[n++] = atomic64_read_unchecked(&leader->count);
93042 if (read_format & PERF_FORMAT_ID)
93043 values[n++] = primary_event_id(leader);
93044
93045@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
93046 if (sub != event)
93047 sub->pmu->read(sub);
93048
93049- values[n++] = atomic64_read(&sub->count);
93050+ values[n++] = atomic64_read_unchecked(&sub->count);
93051 if (read_format & PERF_FORMAT_ID)
93052 values[n++] = primary_event_id(sub);
93053
93054@@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
93055 * need to add enough zero bytes after the string to handle
93056 * the 64bit alignment we do later.
93057 */
93058- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
93059+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
93060 if (!buf) {
93061 name = strncpy(tmp, "//enomem", sizeof(tmp));
93062 goto got_name;
93063 }
93064- name = d_path(&file->f_path, buf, PATH_MAX);
93065+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
93066 if (IS_ERR(name)) {
93067 name = strncpy(tmp, "//toolong", sizeof(tmp));
93068 goto got_name;
93069@@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
93070 {
93071 struct hw_perf_event *hwc = &event->hw;
93072
93073- atomic64_add(nr, &event->count);
93074+ atomic64_add_unchecked(nr, &event->count);
93075
93076 if (!hwc->sample_period)
93077 return;
93078@@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
93079 u64 now;
93080
93081 now = cpu_clock(cpu);
93082- prev = atomic64_read(&event->hw.prev_count);
93083- atomic64_set(&event->hw.prev_count, now);
93084- atomic64_add(now - prev, &event->count);
93085+ prev = atomic64_read_unchecked(&event->hw.prev_count);
93086+ atomic64_set_unchecked(&event->hw.prev_count, now);
93087+ atomic64_add_unchecked(now - prev, &event->count);
93088 }
93089
93090 static int cpu_clock_perf_event_enable(struct perf_event *event)
93091@@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
93092 struct hw_perf_event *hwc = &event->hw;
93093 int cpu = raw_smp_processor_id();
93094
93095- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
93096+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
93097 perf_swevent_start_hrtimer(event);
93098
93099 return 0;
93100@@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
93101 u64 prev;
93102 s64 delta;
93103
93104- prev = atomic64_xchg(&event->hw.prev_count, now);
93105+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
93106 delta = now - prev;
93107- atomic64_add(delta, &event->count);
93108+ atomic64_add_unchecked(delta, &event->count);
93109 }
93110
93111 static int task_clock_perf_event_enable(struct perf_event *event)
93112@@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
93113
93114 now = event->ctx->time;
93115
93116- atomic64_set(&hwc->prev_count, now);
93117+ atomic64_set_unchecked(&hwc->prev_count, now);
93118
93119 perf_swevent_start_hrtimer(event);
93120
93121@@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr,
93122 event->parent = parent_event;
93123
93124 event->ns = get_pid_ns(current->nsproxy->pid_ns);
93125- event->id = atomic64_inc_return(&perf_event_id);
93126+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
93127
93128 event->state = PERF_EVENT_STATE_INACTIVE;
93129
93130@@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event,
93131 if (child_event->attr.inherit_stat)
93132 perf_event_read_event(child_event, child);
93133
93134- child_val = atomic64_read(&child_event->count);
93135+ child_val = atomic64_read_unchecked(&child_event->count);
93136
93137 /*
93138 * Add back the child's count to the parent's count:
93139 */
93140- atomic64_add(child_val, &parent_event->count);
93141- atomic64_add(child_event->total_time_enabled,
93142+ atomic64_add_unchecked(child_val, &parent_event->count);
93143+ atomic64_add_unchecked(child_event->total_time_enabled,
93144 &parent_event->child_total_time_enabled);
93145- atomic64_add(child_event->total_time_running,
93146+ atomic64_add_unchecked(child_event->total_time_running,
93147 &parent_event->child_total_time_running);
93148
93149 /*
93150diff --git a/kernel/pid.c b/kernel/pid.c
93151index fce7198..4f23a7e 100644
93152--- a/kernel/pid.c
93153+++ b/kernel/pid.c
93154@@ -33,6 +33,7 @@
93155 #include <linux/rculist.h>
93156 #include <linux/bootmem.h>
93157 #include <linux/hash.h>
93158+#include <linux/security.h>
93159 #include <linux/pid_namespace.h>
93160 #include <linux/init_task.h>
93161 #include <linux/syscalls.h>
93162@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
93163
93164 int pid_max = PID_MAX_DEFAULT;
93165
93166-#define RESERVED_PIDS 300
93167+#define RESERVED_PIDS 500
93168
93169 int pid_max_min = RESERVED_PIDS + 1;
93170 int pid_max_max = PID_MAX_LIMIT;
93171@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
93172 */
93173 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
93174 {
93175- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
93176+ struct task_struct *task;
93177+
93178+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
93179+
93180+ if (gr_pid_is_chrooted(task))
93181+ return NULL;
93182+
93183+ return task;
93184 }
93185
93186 struct task_struct *find_task_by_vpid(pid_t vnr)
93187@@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
93188 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
93189 }
93190
93191+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
93192+{
93193+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
93194+}
93195+
93196 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
93197 {
93198 struct pid *pid;
93199diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
93200index 5c9dc22..d271117 100644
93201--- a/kernel/posix-cpu-timers.c
93202+++ b/kernel/posix-cpu-timers.c
93203@@ -6,6 +6,7 @@
93204 #include <linux/posix-timers.h>
93205 #include <linux/errno.h>
93206 #include <linux/math64.h>
93207+#include <linux/security.h>
93208 #include <asm/uaccess.h>
93209 #include <linux/kernel_stat.h>
93210 #include <trace/events/timer.h>
93211@@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
93212
93213 static __init int init_posix_cpu_timers(void)
93214 {
93215- struct k_clock process = {
93216+ static struct k_clock process = {
93217 .clock_getres = process_cpu_clock_getres,
93218 .clock_get = process_cpu_clock_get,
93219 .clock_set = do_posix_clock_nosettime,
93220@@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void)
93221 .nsleep = process_cpu_nsleep,
93222 .nsleep_restart = process_cpu_nsleep_restart,
93223 };
93224- struct k_clock thread = {
93225+ static struct k_clock thread = {
93226 .clock_getres = thread_cpu_clock_getres,
93227 .clock_get = thread_cpu_clock_get,
93228 .clock_set = do_posix_clock_nosettime,
93229diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
93230index 5e76d22..cf1baeb 100644
93231--- a/kernel/posix-timers.c
93232+++ b/kernel/posix-timers.c
93233@@ -42,6 +42,7 @@
93234 #include <linux/compiler.h>
93235 #include <linux/idr.h>
93236 #include <linux/posix-timers.h>
93237+#include <linux/grsecurity.h>
93238 #include <linux/syscalls.h>
93239 #include <linux/wait.h>
93240 #include <linux/workqueue.h>
93241@@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
93242 * which we beg off on and pass to do_sys_settimeofday().
93243 */
93244
93245-static struct k_clock posix_clocks[MAX_CLOCKS];
93246+static struct k_clock *posix_clocks[MAX_CLOCKS];
93247
93248 /*
93249 * These ones are defined below.
93250@@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
93251 */
93252 #define CLOCK_DISPATCH(clock, call, arglist) \
93253 ((clock) < 0 ? posix_cpu_##call arglist : \
93254- (posix_clocks[clock].call != NULL \
93255- ? (*posix_clocks[clock].call) arglist : common_##call arglist))
93256+ (posix_clocks[clock]->call != NULL \
93257+ ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
93258
93259 /*
93260 * Default clock hook functions when the struct k_clock passed
93261@@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock,
93262 struct timespec *tp)
93263 {
93264 tp->tv_sec = 0;
93265- tp->tv_nsec = posix_clocks[which_clock].res;
93266+ tp->tv_nsec = posix_clocks[which_clock]->res;
93267 return 0;
93268 }
93269
93270@@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock)
93271 return 0;
93272 if ((unsigned) which_clock >= MAX_CLOCKS)
93273 return 1;
93274- if (posix_clocks[which_clock].clock_getres != NULL)
93275+ if (posix_clocks[which_clock] == NULL)
93276 return 0;
93277- if (posix_clocks[which_clock].res != 0)
93278+ if (posix_clocks[which_clock]->clock_getres != NULL)
93279+ return 0;
93280+ if (posix_clocks[which_clock]->res != 0)
93281 return 0;
93282 return 1;
93283 }
93284@@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
93285 */
93286 static __init int init_posix_timers(void)
93287 {
93288- struct k_clock clock_realtime = {
93289+ static struct k_clock clock_realtime = {
93290 .clock_getres = hrtimer_get_res,
93291 };
93292- struct k_clock clock_monotonic = {
93293+ static struct k_clock clock_monotonic = {
93294 .clock_getres = hrtimer_get_res,
93295 .clock_get = posix_ktime_get_ts,
93296 .clock_set = do_posix_clock_nosettime,
93297 };
93298- struct k_clock clock_monotonic_raw = {
93299+ static struct k_clock clock_monotonic_raw = {
93300 .clock_getres = hrtimer_get_res,
93301 .clock_get = posix_get_monotonic_raw,
93302 .clock_set = do_posix_clock_nosettime,
93303 .timer_create = no_timer_create,
93304 .nsleep = no_nsleep,
93305 };
93306- struct k_clock clock_realtime_coarse = {
93307+ static struct k_clock clock_realtime_coarse = {
93308 .clock_getres = posix_get_coarse_res,
93309 .clock_get = posix_get_realtime_coarse,
93310 .clock_set = do_posix_clock_nosettime,
93311 .timer_create = no_timer_create,
93312 .nsleep = no_nsleep,
93313 };
93314- struct k_clock clock_monotonic_coarse = {
93315+ static struct k_clock clock_monotonic_coarse = {
93316 .clock_getres = posix_get_coarse_res,
93317 .clock_get = posix_get_monotonic_coarse,
93318 .clock_set = do_posix_clock_nosettime,
93319@@ -296,6 +299,8 @@ static __init int init_posix_timers(void)
93320 .nsleep = no_nsleep,
93321 };
93322
93323+ pax_track_stack();
93324+
93325 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
93326 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
93327 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
93328@@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
93329 return;
93330 }
93331
93332- posix_clocks[clock_id] = *new_clock;
93333+ posix_clocks[clock_id] = new_clock;
93334 }
93335 EXPORT_SYMBOL_GPL(register_posix_clock);
93336
93337@@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
93338 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
93339 return -EFAULT;
93340
93341+ /* only the CLOCK_REALTIME clock can be set, all other clocks
93342+ have their clock_set fptr set to a nosettime dummy function
93343+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
93344+ call common_clock_set, which calls do_sys_settimeofday, which
93345+ we hook
93346+ */
93347+
93348 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
93349 }
93350
93351diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
93352index 04a9e90..bc355aa 100644
93353--- a/kernel/power/hibernate.c
93354+++ b/kernel/power/hibernate.c
93355@@ -48,14 +48,14 @@ enum {
93356
93357 static int hibernation_mode = HIBERNATION_SHUTDOWN;
93358
93359-static struct platform_hibernation_ops *hibernation_ops;
93360+static const struct platform_hibernation_ops *hibernation_ops;
93361
93362 /**
93363 * hibernation_set_ops - set the global hibernate operations
93364 * @ops: the hibernation operations to use in subsequent hibernation transitions
93365 */
93366
93367-void hibernation_set_ops(struct platform_hibernation_ops *ops)
93368+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
93369 {
93370 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
93371 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
93372diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
93373index e8b3370..484c2e4 100644
93374--- a/kernel/power/poweroff.c
93375+++ b/kernel/power/poweroff.c
93376@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
93377 .enable_mask = SYSRQ_ENABLE_BOOT,
93378 };
93379
93380-static int pm_sysrq_init(void)
93381+static int __init pm_sysrq_init(void)
93382 {
93383 register_sysrq_key('o', &sysrq_poweroff_op);
93384 return 0;
93385diff --git a/kernel/power/process.c b/kernel/power/process.c
93386index e7cd671..56d5f459 100644
93387--- a/kernel/power/process.c
93388+++ b/kernel/power/process.c
93389@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only)
93390 struct timeval start, end;
93391 u64 elapsed_csecs64;
93392 unsigned int elapsed_csecs;
93393+ bool timedout = false;
93394
93395 do_gettimeofday(&start);
93396
93397 end_time = jiffies + TIMEOUT;
93398 do {
93399 todo = 0;
93400+ if (time_after(jiffies, end_time))
93401+ timedout = true;
93402 read_lock(&tasklist_lock);
93403 do_each_thread(g, p) {
93404 if (frozen(p) || !freezeable(p))
93405@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only)
93406 * It is "frozen enough". If the task does wake
93407 * up, it will immediately call try_to_freeze.
93408 */
93409- if (!task_is_stopped_or_traced(p) &&
93410- !freezer_should_skip(p))
93411+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
93412 todo++;
93413+ if (timedout) {
93414+ printk(KERN_ERR "Task refusing to freeze:\n");
93415+ sched_show_task(p);
93416+ }
93417+ }
93418 } while_each_thread(g, p);
93419 read_unlock(&tasklist_lock);
93420 yield(); /* Yield is okay here */
93421- if (time_after(jiffies, end_time))
93422- break;
93423- } while (todo);
93424+ } while (todo && !timedout);
93425
93426 do_gettimeofday(&end);
93427 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
93428diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
93429index 40dd021..fb30ceb 100644
93430--- a/kernel/power/suspend.c
93431+++ b/kernel/power/suspend.c
93432@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
93433 [PM_SUSPEND_MEM] = "mem",
93434 };
93435
93436-static struct platform_suspend_ops *suspend_ops;
93437+static const struct platform_suspend_ops *suspend_ops;
93438
93439 /**
93440 * suspend_set_ops - Set the global suspend method table.
93441 * @ops: Pointer to ops structure.
93442 */
93443-void suspend_set_ops(struct platform_suspend_ops *ops)
93444+void suspend_set_ops(const struct platform_suspend_ops *ops)
93445 {
93446 mutex_lock(&pm_mutex);
93447 suspend_ops = ops;
93448diff --git a/kernel/printk.c b/kernel/printk.c
93449index 4cade47..4d17900 100644
93450--- a/kernel/printk.c
93451+++ b/kernel/printk.c
93452@@ -33,6 +33,7 @@
93453 #include <linux/bootmem.h>
93454 #include <linux/syscalls.h>
93455 #include <linux/kexec.h>
93456+#include <linux/syslog.h>
93457
93458 #include <asm/uaccess.h>
93459
93460@@ -256,38 +257,30 @@ static inline void boot_delay_msec(void)
93461 }
93462 #endif
93463
93464-/*
93465- * Commands to do_syslog:
93466- *
93467- * 0 -- Close the log. Currently a NOP.
93468- * 1 -- Open the log. Currently a NOP.
93469- * 2 -- Read from the log.
93470- * 3 -- Read all messages remaining in the ring buffer.
93471- * 4 -- Read and clear all messages remaining in the ring buffer
93472- * 5 -- Clear ring buffer.
93473- * 6 -- Disable printk's to console
93474- * 7 -- Enable printk's to console
93475- * 8 -- Set level of messages printed to console
93476- * 9 -- Return number of unread characters in the log buffer
93477- * 10 -- Return size of the log buffer
93478- */
93479-int do_syslog(int type, char __user *buf, int len)
93480+int do_syslog(int type, char __user *buf, int len, bool from_file)
93481 {
93482 unsigned i, j, limit, count;
93483 int do_clear = 0;
93484 char c;
93485 int error = 0;
93486
93487- error = security_syslog(type);
93488+#ifdef CONFIG_GRKERNSEC_DMESG
93489+ if (grsec_enable_dmesg &&
93490+ (!from_file || (from_file && type == SYSLOG_ACTION_OPEN)) &&
93491+ !capable(CAP_SYS_ADMIN))
93492+ return -EPERM;
93493+#endif
93494+
93495+ error = security_syslog(type, from_file);
93496 if (error)
93497 return error;
93498
93499 switch (type) {
93500- case 0: /* Close log */
93501+ case SYSLOG_ACTION_CLOSE: /* Close log */
93502 break;
93503- case 1: /* Open log */
93504+ case SYSLOG_ACTION_OPEN: /* Open log */
93505 break;
93506- case 2: /* Read from log */
93507+ case SYSLOG_ACTION_READ: /* Read from log */
93508 error = -EINVAL;
93509 if (!buf || len < 0)
93510 goto out;
93511@@ -318,10 +311,12 @@ int do_syslog(int type, char __user *buf, int len)
93512 if (!error)
93513 error = i;
93514 break;
93515- case 4: /* Read/clear last kernel messages */
93516+ /* Read/clear last kernel messages */
93517+ case SYSLOG_ACTION_READ_CLEAR:
93518 do_clear = 1;
93519 /* FALL THRU */
93520- case 3: /* Read last kernel messages */
93521+ /* Read last kernel messages */
93522+ case SYSLOG_ACTION_READ_ALL:
93523 error = -EINVAL;
93524 if (!buf || len < 0)
93525 goto out;
93526@@ -374,21 +369,25 @@ int do_syslog(int type, char __user *buf, int len)
93527 }
93528 }
93529 break;
93530- case 5: /* Clear ring buffer */
93531+ /* Clear ring buffer */
93532+ case SYSLOG_ACTION_CLEAR:
93533 logged_chars = 0;
93534 break;
93535- case 6: /* Disable logging to console */
93536+ /* Disable logging to console */
93537+ case SYSLOG_ACTION_CONSOLE_OFF:
93538 if (saved_console_loglevel == -1)
93539 saved_console_loglevel = console_loglevel;
93540 console_loglevel = minimum_console_loglevel;
93541 break;
93542- case 7: /* Enable logging to console */
93543+ /* Enable logging to console */
93544+ case SYSLOG_ACTION_CONSOLE_ON:
93545 if (saved_console_loglevel != -1) {
93546 console_loglevel = saved_console_loglevel;
93547 saved_console_loglevel = -1;
93548 }
93549 break;
93550- case 8: /* Set level of messages printed to console */
93551+ /* Set level of messages printed to console */
93552+ case SYSLOG_ACTION_CONSOLE_LEVEL:
93553 error = -EINVAL;
93554 if (len < 1 || len > 8)
93555 goto out;
93556@@ -399,10 +398,12 @@ int do_syslog(int type, char __user *buf, int len)
93557 saved_console_loglevel = -1;
93558 error = 0;
93559 break;
93560- case 9: /* Number of chars in the log buffer */
93561+ /* Number of chars in the log buffer */
93562+ case SYSLOG_ACTION_SIZE_UNREAD:
93563 error = log_end - log_start;
93564 break;
93565- case 10: /* Size of the log buffer */
93566+ /* Size of the log buffer */
93567+ case SYSLOG_ACTION_SIZE_BUFFER:
93568 error = log_buf_len;
93569 break;
93570 default:
93571@@ -415,7 +416,7 @@ out:
93572
93573 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
93574 {
93575- return do_syslog(type, buf, len);
93576+ return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
93577 }
93578
93579 /*
93580diff --git a/kernel/profile.c b/kernel/profile.c
93581index dfadc5b..7f59404 100644
93582--- a/kernel/profile.c
93583+++ b/kernel/profile.c
93584@@ -39,7 +39,7 @@ struct profile_hit {
93585 /* Oprofile timer tick hook */
93586 static int (*timer_hook)(struct pt_regs *) __read_mostly;
93587
93588-static atomic_t *prof_buffer;
93589+static atomic_unchecked_t *prof_buffer;
93590 static unsigned long prof_len, prof_shift;
93591
93592 int prof_on __read_mostly;
93593@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
93594 hits[i].pc = 0;
93595 continue;
93596 }
93597- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
93598+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
93599 hits[i].hits = hits[i].pc = 0;
93600 }
93601 }
93602@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
93603 * Add the current hit(s) and flush the write-queue out
93604 * to the global buffer:
93605 */
93606- atomic_add(nr_hits, &prof_buffer[pc]);
93607+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
93608 for (i = 0; i < NR_PROFILE_HIT; ++i) {
93609- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
93610+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
93611 hits[i].pc = hits[i].hits = 0;
93612 }
93613 out:
93614@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
93615 if (prof_on != type || !prof_buffer)
93616 return;
93617 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
93618- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
93619+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
93620 }
93621 #endif /* !CONFIG_SMP */
93622 EXPORT_SYMBOL_GPL(profile_hits);
93623@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
93624 return -EFAULT;
93625 buf++; p++; count--; read++;
93626 }
93627- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
93628+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
93629 if (copy_to_user(buf, (void *)pnt, count))
93630 return -EFAULT;
93631 read += count;
93632@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
93633 }
93634 #endif
93635 profile_discard_flip_buffers();
93636- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
93637+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
93638 return count;
93639 }
93640
93641diff --git a/kernel/ptrace.c b/kernel/ptrace.c
93642index 05625f6..733bf70 100644
93643--- a/kernel/ptrace.c
93644+++ b/kernel/ptrace.c
93645@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
93646 return ret;
93647 }
93648
93649-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
93650+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
93651+ unsigned int log)
93652 {
93653 const struct cred *cred = current_cred(), *tcred;
93654
93655@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
93656 cred->gid != tcred->egid ||
93657 cred->gid != tcred->sgid ||
93658 cred->gid != tcred->gid) &&
93659- !capable(CAP_SYS_PTRACE)) {
93660+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
93661+ (log && !capable(CAP_SYS_PTRACE)))
93662+ ) {
93663 rcu_read_unlock();
93664 return -EPERM;
93665 }
93666@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
93667 smp_rmb();
93668 if (task->mm)
93669 dumpable = get_dumpable(task->mm);
93670- if (!dumpable && !capable(CAP_SYS_PTRACE))
93671+ if (!dumpable &&
93672+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
93673+ (log && !capable(CAP_SYS_PTRACE))))
93674 return -EPERM;
93675
93676 return security_ptrace_access_check(task, mode);
93677@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
93678 {
93679 int err;
93680 task_lock(task);
93681- err = __ptrace_may_access(task, mode);
93682+ err = __ptrace_may_access(task, mode, 0);
93683+ task_unlock(task);
93684+ return !err;
93685+}
93686+
93687+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
93688+{
93689+ int err;
93690+ task_lock(task);
93691+ err = __ptrace_may_access(task, mode, 1);
93692 task_unlock(task);
93693 return !err;
93694 }
93695@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task)
93696 goto out;
93697
93698 task_lock(task);
93699- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
93700+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
93701 task_unlock(task);
93702 if (retval)
93703 goto unlock_creds;
93704@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
93705 goto unlock_tasklist;
93706
93707 task->ptrace = PT_PTRACED;
93708- if (capable(CAP_SYS_PTRACE))
93709+ if (capable_nolog(CAP_SYS_PTRACE))
93710 task->ptrace |= PT_PTRACE_CAP;
93711
93712 __ptrace_link(task, current);
93713@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
93714 {
93715 int copied = 0;
93716
93717+ pax_track_stack();
93718+
93719 while (len > 0) {
93720 char buf[128];
93721 int this_len, retval;
93722@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
93723 {
93724 int copied = 0;
93725
93726+ pax_track_stack();
93727+
93728 while (len > 0) {
93729 char buf[128];
93730 int this_len, retval;
93731@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
93732 int ret = -EIO;
93733 siginfo_t siginfo;
93734
93735+ pax_track_stack();
93736+
93737 switch (request) {
93738 case PTRACE_PEEKTEXT:
93739 case PTRACE_PEEKDATA:
93740@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
93741 ret = ptrace_setoptions(child, data);
93742 break;
93743 case PTRACE_GETEVENTMSG:
93744- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
93745+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
93746 break;
93747
93748 case PTRACE_GETSIGINFO:
93749 ret = ptrace_getsiginfo(child, &siginfo);
93750 if (!ret)
93751- ret = copy_siginfo_to_user((siginfo_t __user *) data,
93752+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
93753 &siginfo);
93754 break;
93755
93756 case PTRACE_SETSIGINFO:
93757- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
93758+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
93759 sizeof siginfo))
93760 ret = -EFAULT;
93761 else
93762@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
93763 goto out;
93764 }
93765
93766+ if (gr_handle_ptrace(child, request)) {
93767+ ret = -EPERM;
93768+ goto out_put_task_struct;
93769+ }
93770+
93771 if (request == PTRACE_ATTACH) {
93772 ret = ptrace_attach(child);
93773 /*
93774 * Some architectures need to do book-keeping after
93775 * a ptrace attach.
93776 */
93777- if (!ret)
93778+ if (!ret) {
93779 arch_ptrace_attach(child);
93780+ gr_audit_ptrace(child);
93781+ }
93782 goto out_put_task_struct;
93783 }
93784
93785@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
93786 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
93787 if (copied != sizeof(tmp))
93788 return -EIO;
93789- return put_user(tmp, (unsigned long __user *)data);
93790+ return put_user(tmp, (__force unsigned long __user *)data);
93791 }
93792
93793 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
93794@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
93795 siginfo_t siginfo;
93796 int ret;
93797
93798+ pax_track_stack();
93799+
93800 switch (request) {
93801 case PTRACE_PEEKTEXT:
93802 case PTRACE_PEEKDATA:
93803@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
93804 goto out;
93805 }
93806
93807+ if (gr_handle_ptrace(child, request)) {
93808+ ret = -EPERM;
93809+ goto out_put_task_struct;
93810+ }
93811+
93812 if (request == PTRACE_ATTACH) {
93813 ret = ptrace_attach(child);
93814 /*
93815 * Some architectures need to do book-keeping after
93816 * a ptrace attach.
93817 */
93818- if (!ret)
93819+ if (!ret) {
93820 arch_ptrace_attach(child);
93821+ gr_audit_ptrace(child);
93822+ }
93823 goto out_put_task_struct;
93824 }
93825
93826diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
93827index 697c0a0..2402696 100644
93828--- a/kernel/rcutorture.c
93829+++ b/kernel/rcutorture.c
93830@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
93831 { 0 };
93832 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
93833 { 0 };
93834-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93835-static atomic_t n_rcu_torture_alloc;
93836-static atomic_t n_rcu_torture_alloc_fail;
93837-static atomic_t n_rcu_torture_free;
93838-static atomic_t n_rcu_torture_mberror;
93839-static atomic_t n_rcu_torture_error;
93840+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93841+static atomic_unchecked_t n_rcu_torture_alloc;
93842+static atomic_unchecked_t n_rcu_torture_alloc_fail;
93843+static atomic_unchecked_t n_rcu_torture_free;
93844+static atomic_unchecked_t n_rcu_torture_mberror;
93845+static atomic_unchecked_t n_rcu_torture_error;
93846 static long n_rcu_torture_timers;
93847 static struct list_head rcu_torture_removed;
93848 static cpumask_var_t shuffle_tmp_mask;
93849@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
93850
93851 spin_lock_bh(&rcu_torture_lock);
93852 if (list_empty(&rcu_torture_freelist)) {
93853- atomic_inc(&n_rcu_torture_alloc_fail);
93854+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
93855 spin_unlock_bh(&rcu_torture_lock);
93856 return NULL;
93857 }
93858- atomic_inc(&n_rcu_torture_alloc);
93859+ atomic_inc_unchecked(&n_rcu_torture_alloc);
93860 p = rcu_torture_freelist.next;
93861 list_del_init(p);
93862 spin_unlock_bh(&rcu_torture_lock);
93863@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
93864 static void
93865 rcu_torture_free(struct rcu_torture *p)
93866 {
93867- atomic_inc(&n_rcu_torture_free);
93868+ atomic_inc_unchecked(&n_rcu_torture_free);
93869 spin_lock_bh(&rcu_torture_lock);
93870 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
93871 spin_unlock_bh(&rcu_torture_lock);
93872@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
93873 i = rp->rtort_pipe_count;
93874 if (i > RCU_TORTURE_PIPE_LEN)
93875 i = RCU_TORTURE_PIPE_LEN;
93876- atomic_inc(&rcu_torture_wcount[i]);
93877+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93878 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
93879 rp->rtort_mbtest = 0;
93880 rcu_torture_free(rp);
93881@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
93882 i = rp->rtort_pipe_count;
93883 if (i > RCU_TORTURE_PIPE_LEN)
93884 i = RCU_TORTURE_PIPE_LEN;
93885- atomic_inc(&rcu_torture_wcount[i]);
93886+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93887 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
93888 rp->rtort_mbtest = 0;
93889 list_del(&rp->rtort_free);
93890@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
93891 i = old_rp->rtort_pipe_count;
93892 if (i > RCU_TORTURE_PIPE_LEN)
93893 i = RCU_TORTURE_PIPE_LEN;
93894- atomic_inc(&rcu_torture_wcount[i]);
93895+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93896 old_rp->rtort_pipe_count++;
93897 cur_ops->deferred_free(old_rp);
93898 }
93899@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused)
93900 return;
93901 }
93902 if (p->rtort_mbtest == 0)
93903- atomic_inc(&n_rcu_torture_mberror);
93904+ atomic_inc_unchecked(&n_rcu_torture_mberror);
93905 spin_lock(&rand_lock);
93906 cur_ops->read_delay(&rand);
93907 n_rcu_torture_timers++;
93908@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
93909 continue;
93910 }
93911 if (p->rtort_mbtest == 0)
93912- atomic_inc(&n_rcu_torture_mberror);
93913+ atomic_inc_unchecked(&n_rcu_torture_mberror);
93914 cur_ops->read_delay(&rand);
93915 preempt_disable();
93916 pipe_count = p->rtort_pipe_count;
93917@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
93918 rcu_torture_current,
93919 rcu_torture_current_version,
93920 list_empty(&rcu_torture_freelist),
93921- atomic_read(&n_rcu_torture_alloc),
93922- atomic_read(&n_rcu_torture_alloc_fail),
93923- atomic_read(&n_rcu_torture_free),
93924- atomic_read(&n_rcu_torture_mberror),
93925+ atomic_read_unchecked(&n_rcu_torture_alloc),
93926+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
93927+ atomic_read_unchecked(&n_rcu_torture_free),
93928+ atomic_read_unchecked(&n_rcu_torture_mberror),
93929 n_rcu_torture_timers);
93930- if (atomic_read(&n_rcu_torture_mberror) != 0)
93931+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
93932 cnt += sprintf(&page[cnt], " !!!");
93933 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
93934 if (i > 1) {
93935 cnt += sprintf(&page[cnt], "!!! ");
93936- atomic_inc(&n_rcu_torture_error);
93937+ atomic_inc_unchecked(&n_rcu_torture_error);
93938 WARN_ON_ONCE(1);
93939 }
93940 cnt += sprintf(&page[cnt], "Reader Pipe: ");
93941@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
93942 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
93943 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
93944 cnt += sprintf(&page[cnt], " %d",
93945- atomic_read(&rcu_torture_wcount[i]));
93946+ atomic_read_unchecked(&rcu_torture_wcount[i]));
93947 }
93948 cnt += sprintf(&page[cnt], "\n");
93949 if (cur_ops->stats)
93950@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
93951
93952 if (cur_ops->cleanup)
93953 cur_ops->cleanup();
93954- if (atomic_read(&n_rcu_torture_error))
93955+ if (atomic_read_unchecked(&n_rcu_torture_error))
93956 rcu_torture_print_module_parms("End of test: FAILURE");
93957 else
93958 rcu_torture_print_module_parms("End of test: SUCCESS");
93959@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
93960
93961 rcu_torture_current = NULL;
93962 rcu_torture_current_version = 0;
93963- atomic_set(&n_rcu_torture_alloc, 0);
93964- atomic_set(&n_rcu_torture_alloc_fail, 0);
93965- atomic_set(&n_rcu_torture_free, 0);
93966- atomic_set(&n_rcu_torture_mberror, 0);
93967- atomic_set(&n_rcu_torture_error, 0);
93968+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
93969+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
93970+ atomic_set_unchecked(&n_rcu_torture_free, 0);
93971+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
93972+ atomic_set_unchecked(&n_rcu_torture_error, 0);
93973 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
93974- atomic_set(&rcu_torture_wcount[i], 0);
93975+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
93976 for_each_possible_cpu(cpu) {
93977 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
93978 per_cpu(rcu_torture_count, cpu)[i] = 0;
93979diff --git a/kernel/rcutree.c b/kernel/rcutree.c
93980index 683c4f3..97f54c6 100644
93981--- a/kernel/rcutree.c
93982+++ b/kernel/rcutree.c
93983@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
93984 /*
93985 * Do softirq processing for the current CPU.
93986 */
93987-static void rcu_process_callbacks(struct softirq_action *unused)
93988+static void rcu_process_callbacks(void)
93989 {
93990 /*
93991 * Memory references from any prior RCU read-side critical sections
93992diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
93993index c03edf7..ac1b341 100644
93994--- a/kernel/rcutree_plugin.h
93995+++ b/kernel/rcutree_plugin.h
93996@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu)
93997 */
93998 void __rcu_read_lock(void)
93999 {
94000- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
94001+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
94002 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
94003 }
94004 EXPORT_SYMBOL_GPL(__rcu_read_lock);
94005@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
94006 struct task_struct *t = current;
94007
94008 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
94009- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
94010+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
94011 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
94012 rcu_read_unlock_special(t);
94013 }
94014diff --git a/kernel/relay.c b/kernel/relay.c
94015index bf343f5..908e9ee 100644
94016--- a/kernel/relay.c
94017+++ b/kernel/relay.c
94018@@ -1228,7 +1228,7 @@ static int subbuf_splice_actor(struct file *in,
94019 unsigned int flags,
94020 int *nonpad_ret)
94021 {
94022- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
94023+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
94024 struct rchan_buf *rbuf = in->private_data;
94025 unsigned int subbuf_size = rbuf->chan->subbuf_size;
94026 uint64_t pos = (uint64_t) *ppos;
94027@@ -1247,6 +1247,9 @@ static int subbuf_splice_actor(struct file *in,
94028 .ops = &relay_pipe_buf_ops,
94029 .spd_release = relay_page_release,
94030 };
94031+ ssize_t ret;
94032+
94033+ pax_track_stack();
94034
94035 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
94036 return 0;
94037diff --git a/kernel/resource.c b/kernel/resource.c
94038index fb11a58..4e61ae1 100644
94039--- a/kernel/resource.c
94040+++ b/kernel/resource.c
94041@@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = {
94042
94043 static int __init ioresources_init(void)
94044 {
94045+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94046+#ifdef CONFIG_GRKERNSEC_PROC_USER
94047+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
94048+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
94049+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
94050+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
94051+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
94052+#endif
94053+#else
94054 proc_create("ioports", 0, NULL, &proc_ioports_operations);
94055 proc_create("iomem", 0, NULL, &proc_iomem_operations);
94056+#endif
94057 return 0;
94058 }
94059 __initcall(ioresources_init);
94060diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
94061index a56f629..1fc4989 100644
94062--- a/kernel/rtmutex-tester.c
94063+++ b/kernel/rtmutex-tester.c
94064@@ -21,7 +21,7 @@
94065 #define MAX_RT_TEST_MUTEXES 8
94066
94067 static spinlock_t rttest_lock;
94068-static atomic_t rttest_event;
94069+static atomic_unchecked_t rttest_event;
94070
94071 struct test_thread_data {
94072 int opcode;
94073@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
94074
94075 case RTTEST_LOCKCONT:
94076 td->mutexes[td->opdata] = 1;
94077- td->event = atomic_add_return(1, &rttest_event);
94078+ td->event = atomic_add_return_unchecked(1, &rttest_event);
94079 return 0;
94080
94081 case RTTEST_RESET:
94082@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
94083 return 0;
94084
94085 case RTTEST_RESETEVENT:
94086- atomic_set(&rttest_event, 0);
94087+ atomic_set_unchecked(&rttest_event, 0);
94088 return 0;
94089
94090 default:
94091@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
94092 return ret;
94093
94094 td->mutexes[id] = 1;
94095- td->event = atomic_add_return(1, &rttest_event);
94096+ td->event = atomic_add_return_unchecked(1, &rttest_event);
94097 rt_mutex_lock(&mutexes[id]);
94098- td->event = atomic_add_return(1, &rttest_event);
94099+ td->event = atomic_add_return_unchecked(1, &rttest_event);
94100 td->mutexes[id] = 4;
94101 return 0;
94102
94103@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
94104 return ret;
94105
94106 td->mutexes[id] = 1;
94107- td->event = atomic_add_return(1, &rttest_event);
94108+ td->event = atomic_add_return_unchecked(1, &rttest_event);
94109 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
94110- td->event = atomic_add_return(1, &rttest_event);
94111+ td->event = atomic_add_return_unchecked(1, &rttest_event);
94112 td->mutexes[id] = ret ? 0 : 4;
94113 return ret ? -EINTR : 0;
94114
94115@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
94116 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
94117 return ret;
94118
94119- td->event = atomic_add_return(1, &rttest_event);
94120+ td->event = atomic_add_return_unchecked(1, &rttest_event);
94121 rt_mutex_unlock(&mutexes[id]);
94122- td->event = atomic_add_return(1, &rttest_event);
94123+ td->event = atomic_add_return_unchecked(1, &rttest_event);
94124 td->mutexes[id] = 0;
94125 return 0;
94126
94127@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
94128 break;
94129
94130 td->mutexes[dat] = 2;
94131- td->event = atomic_add_return(1, &rttest_event);
94132+ td->event = atomic_add_return_unchecked(1, &rttest_event);
94133 break;
94134
94135 case RTTEST_LOCKBKL:
94136@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
94137 return;
94138
94139 td->mutexes[dat] = 3;
94140- td->event = atomic_add_return(1, &rttest_event);
94141+ td->event = atomic_add_return_unchecked(1, &rttest_event);
94142 break;
94143
94144 case RTTEST_LOCKNOWAIT:
94145@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
94146 return;
94147
94148 td->mutexes[dat] = 1;
94149- td->event = atomic_add_return(1, &rttest_event);
94150+ td->event = atomic_add_return_unchecked(1, &rttest_event);
94151 return;
94152
94153 case RTTEST_LOCKBKL:
94154diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
94155index 29bd4ba..8c5de90 100644
94156--- a/kernel/rtmutex.c
94157+++ b/kernel/rtmutex.c
94158@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
94159 */
94160 spin_lock_irqsave(&pendowner->pi_lock, flags);
94161
94162- WARN_ON(!pendowner->pi_blocked_on);
94163+ BUG_ON(!pendowner->pi_blocked_on);
94164 WARN_ON(pendowner->pi_blocked_on != waiter);
94165 WARN_ON(pendowner->pi_blocked_on->lock != lock);
94166
94167diff --git a/kernel/sched.c b/kernel/sched.c
94168index 0591df8..e3af3a4 100644
94169--- a/kernel/sched.c
94170+++ b/kernel/sched.c
94171@@ -5043,7 +5043,7 @@ out:
94172 * In CONFIG_NO_HZ case, the idle load balance owner will do the
94173 * rebalancing for all the cpus for whom scheduler ticks are stopped.
94174 */
94175-static void run_rebalance_domains(struct softirq_action *h)
94176+static void run_rebalance_domains(void)
94177 {
94178 int this_cpu = smp_processor_id();
94179 struct rq *this_rq = cpu_rq(this_cpu);
94180@@ -5690,6 +5690,19 @@ pick_next_task(struct rq *rq)
94181 }
94182 }
94183
94184+#ifdef CONFIG_GRKERNSEC_SETXID
94185+extern void gr_delayed_cred_worker(void);
94186+static inline void gr_cred_schedule(void)
94187+{
94188+ if (unlikely(current->delayed_cred))
94189+ gr_delayed_cred_worker();
94190+}
94191+#else
94192+static inline void gr_cred_schedule(void)
94193+{
94194+}
94195+#endif
94196+
94197 /*
94198 * schedule() is the main scheduler function.
94199 */
94200@@ -5700,6 +5713,8 @@ asmlinkage void __sched schedule(void)
94201 struct rq *rq;
94202 int cpu;
94203
94204+ pax_track_stack();
94205+
94206 need_resched:
94207 preempt_disable();
94208 cpu = smp_processor_id();
94209@@ -5713,6 +5728,8 @@ need_resched_nonpreemptible:
94210
94211 schedule_debug(prev);
94212
94213+ gr_cred_schedule();
94214+
94215 if (sched_feat(HRTICK))
94216 hrtick_clear(rq);
94217
94218@@ -5770,7 +5787,7 @@ EXPORT_SYMBOL(schedule);
94219 * Look out! "owner" is an entirely speculative pointer
94220 * access and not reliable.
94221 */
94222-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
94223+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
94224 {
94225 unsigned int cpu;
94226 struct rq *rq;
94227@@ -5784,10 +5801,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
94228 * DEBUG_PAGEALLOC could have unmapped it if
94229 * the mutex owner just released it and exited.
94230 */
94231- if (probe_kernel_address(&owner->cpu, cpu))
94232+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
94233 return 0;
94234 #else
94235- cpu = owner->cpu;
94236+ cpu = task_thread_info(owner)->cpu;
94237 #endif
94238
94239 /*
94240@@ -5816,7 +5833,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
94241 /*
94242 * Is that owner really running on that cpu?
94243 */
94244- if (task_thread_info(rq->curr) != owner || need_resched())
94245+ if (rq->curr != owner || need_resched())
94246 return 0;
94247
94248 cpu_relax();
94249@@ -6359,6 +6376,8 @@ int can_nice(const struct task_struct *p, const int nice)
94250 /* convert nice value [19,-20] to rlimit style value [1,40] */
94251 int nice_rlim = 20 - nice;
94252
94253+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
94254+
94255 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
94256 capable(CAP_SYS_NICE));
94257 }
94258@@ -6392,7 +6411,8 @@ SYSCALL_DEFINE1(nice, int, increment)
94259 if (nice > 19)
94260 nice = 19;
94261
94262- if (increment < 0 && !can_nice(current, nice))
94263+ if (increment < 0 && (!can_nice(current, nice) ||
94264+ gr_handle_chroot_nice()))
94265 return -EPERM;
94266
94267 retval = security_task_setnice(current, nice);
94268@@ -8774,7 +8794,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
94269 long power;
94270 int weight;
94271
94272- WARN_ON(!sd || !sd->groups);
94273+ BUG_ON(!sd || !sd->groups);
94274
94275 if (cpu != group_first_cpu(sd->groups))
94276 return;
94277diff --git a/kernel/signal.c b/kernel/signal.c
94278index 2494827..cda80a0 100644
94279--- a/kernel/signal.c
94280+++ b/kernel/signal.c
94281@@ -41,12 +41,12 @@
94282
94283 static struct kmem_cache *sigqueue_cachep;
94284
94285-static void __user *sig_handler(struct task_struct *t, int sig)
94286+static __sighandler_t sig_handler(struct task_struct *t, int sig)
94287 {
94288 return t->sighand->action[sig - 1].sa.sa_handler;
94289 }
94290
94291-static int sig_handler_ignored(void __user *handler, int sig)
94292+static int sig_handler_ignored(__sighandler_t handler, int sig)
94293 {
94294 /* Is it explicitly or implicitly ignored? */
94295 return handler == SIG_IGN ||
94296@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
94297 static int sig_task_ignored(struct task_struct *t, int sig,
94298 int from_ancestor_ns)
94299 {
94300- void __user *handler;
94301+ __sighandler_t handler;
94302
94303 handler = sig_handler(t, sig);
94304
94305@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
94306 */
94307 user = get_uid(__task_cred(t)->user);
94308 atomic_inc(&user->sigpending);
94309+
94310+ if (!override_rlimit)
94311+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
94312 if (override_rlimit ||
94313 atomic_read(&user->sigpending) <=
94314 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
94315@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
94316
94317 int unhandled_signal(struct task_struct *tsk, int sig)
94318 {
94319- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
94320+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
94321 if (is_global_init(tsk))
94322 return 1;
94323 if (handler != SIG_IGN && handler != SIG_DFL)
94324@@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
94325 }
94326 }
94327
94328+ /* allow glibc communication via tgkill to other threads in our
94329+ thread group */
94330+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
94331+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
94332+ && gr_handle_signal(t, sig))
94333+ return -EPERM;
94334+
94335 return security_task_kill(t, info, sig, 0);
94336 }
94337
94338@@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
94339 return send_signal(sig, info, p, 1);
94340 }
94341
94342-static int
94343+int
94344 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94345 {
94346 return send_signal(sig, info, t, 0);
94347@@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94348 unsigned long int flags;
94349 int ret, blocked, ignored;
94350 struct k_sigaction *action;
94351+ int is_unhandled = 0;
94352
94353 spin_lock_irqsave(&t->sighand->siglock, flags);
94354 action = &t->sighand->action[sig-1];
94355@@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94356 }
94357 if (action->sa.sa_handler == SIG_DFL)
94358 t->signal->flags &= ~SIGNAL_UNKILLABLE;
94359+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
94360+ is_unhandled = 1;
94361 ret = specific_send_sig_info(sig, info, t);
94362 spin_unlock_irqrestore(&t->sighand->siglock, flags);
94363
94364+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
94365+ normal operation */
94366+ if (is_unhandled) {
94367+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
94368+ gr_handle_crash(t, sig);
94369+ }
94370+
94371 return ret;
94372 }
94373
94374@@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
94375 {
94376 int ret = check_kill_permission(sig, info, p);
94377
94378- if (!ret && sig)
94379+ if (!ret && sig) {
94380 ret = do_send_sig_info(sig, info, p, true);
94381+ if (!ret)
94382+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
94383+ }
94384
94385 return ret;
94386 }
94387@@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
94388 {
94389 siginfo_t info;
94390
94391+ pax_track_stack();
94392+
94393 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
94394
94395 memset(&info, 0, sizeof info);
94396@@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
94397 int error = -ESRCH;
94398
94399 rcu_read_lock();
94400- p = find_task_by_vpid(pid);
94401+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
94402+ /* allow glibc communication via tgkill to other threads in our
94403+ thread group */
94404+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
94405+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
94406+ p = find_task_by_vpid_unrestricted(pid);
94407+ else
94408+#endif
94409+ p = find_task_by_vpid(pid);
94410 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
94411 error = check_kill_permission(sig, info, p);
94412 /*
94413diff --git a/kernel/smp.c b/kernel/smp.c
94414index aa9cff3..631a0de 100644
94415--- a/kernel/smp.c
94416+++ b/kernel/smp.c
94417@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
94418 }
94419 EXPORT_SYMBOL(smp_call_function);
94420
94421-void ipi_call_lock(void)
94422+void ipi_call_lock(void) __acquires(call_function.lock)
94423 {
94424 spin_lock(&call_function.lock);
94425 }
94426
94427-void ipi_call_unlock(void)
94428+void ipi_call_unlock(void) __releases(call_function.lock)
94429 {
94430 spin_unlock(&call_function.lock);
94431 }
94432
94433-void ipi_call_lock_irq(void)
94434+void ipi_call_lock_irq(void) __acquires(call_function.lock)
94435 {
94436 spin_lock_irq(&call_function.lock);
94437 }
94438
94439-void ipi_call_unlock_irq(void)
94440+void ipi_call_unlock_irq(void) __releases(call_function.lock)
94441 {
94442 spin_unlock_irq(&call_function.lock);
94443 }
94444diff --git a/kernel/softirq.c b/kernel/softirq.c
94445index 04a0252..580c512 100644
94446--- a/kernel/softirq.c
94447+++ b/kernel/softirq.c
94448@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
94449
94450 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
94451
94452-char *softirq_to_name[NR_SOFTIRQS] = {
94453+const char * const softirq_to_name[NR_SOFTIRQS] = {
94454 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
94455 "TASKLET", "SCHED", "HRTIMER", "RCU"
94456 };
94457@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
94458
94459 asmlinkage void __do_softirq(void)
94460 {
94461- struct softirq_action *h;
94462+ const struct softirq_action *h;
94463 __u32 pending;
94464 int max_restart = MAX_SOFTIRQ_RESTART;
94465 int cpu;
94466@@ -233,7 +233,7 @@ restart:
94467 kstat_incr_softirqs_this_cpu(h - softirq_vec);
94468
94469 trace_softirq_entry(h, softirq_vec);
94470- h->action(h);
94471+ h->action();
94472 trace_softirq_exit(h, softirq_vec);
94473 if (unlikely(prev_count != preempt_count())) {
94474 printk(KERN_ERR "huh, entered softirq %td %s %p"
94475@@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
94476 local_irq_restore(flags);
94477 }
94478
94479-void open_softirq(int nr, void (*action)(struct softirq_action *))
94480+void open_softirq(int nr, void (*action)(void))
94481 {
94482- softirq_vec[nr].action = action;
94483+ pax_open_kernel();
94484+ *(void **)&softirq_vec[nr].action = action;
94485+ pax_close_kernel();
94486 }
94487
94488 /*
94489@@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
94490
94491 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
94492
94493-static void tasklet_action(struct softirq_action *a)
94494+static void tasklet_action(void)
94495 {
94496 struct tasklet_struct *list;
94497
94498@@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a)
94499 }
94500 }
94501
94502-static void tasklet_hi_action(struct softirq_action *a)
94503+static void tasklet_hi_action(void)
94504 {
94505 struct tasklet_struct *list;
94506
94507diff --git a/kernel/sys.c b/kernel/sys.c
94508index e9512b1..f07185f 100644
94509--- a/kernel/sys.c
94510+++ b/kernel/sys.c
94511@@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
94512 error = -EACCES;
94513 goto out;
94514 }
94515+
94516+ if (gr_handle_chroot_setpriority(p, niceval)) {
94517+ error = -EACCES;
94518+ goto out;
94519+ }
94520+
94521 no_nice = security_task_setnice(p, niceval);
94522 if (no_nice) {
94523 error = no_nice;
94524@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
94525 !(user = find_user(who)))
94526 goto out_unlock; /* No processes for this user */
94527
94528- do_each_thread(g, p)
94529+ do_each_thread(g, p) {
94530 if (__task_cred(p)->uid == who)
94531 error = set_one_prio(p, niceval, error);
94532- while_each_thread(g, p);
94533+ } while_each_thread(g, p);
94534 if (who != cred->uid)
94535 free_uid(user); /* For find_user() */
94536 break;
94537@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
94538 !(user = find_user(who)))
94539 goto out_unlock; /* No processes for this user */
94540
94541- do_each_thread(g, p)
94542+ do_each_thread(g, p) {
94543 if (__task_cred(p)->uid == who) {
94544 niceval = 20 - task_nice(p);
94545 if (niceval > retval)
94546 retval = niceval;
94547 }
94548- while_each_thread(g, p);
94549+ } while_each_thread(g, p);
94550 if (who != cred->uid)
94551 free_uid(user); /* for find_user() */
94552 break;
94553@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
94554 goto error;
94555 }
94556
94557+ if (gr_check_group_change(new->gid, new->egid, -1))
94558+ goto error;
94559+
94560 if (rgid != (gid_t) -1 ||
94561 (egid != (gid_t) -1 && egid != old->gid))
94562 new->sgid = new->egid;
94563@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
94564 goto error;
94565
94566 retval = -EPERM;
94567+
94568+ if (gr_check_group_change(gid, gid, gid))
94569+ goto error;
94570+
94571 if (capable(CAP_SETGID))
94572 new->gid = new->egid = new->sgid = new->fsgid = gid;
94573 else if (gid == old->gid || gid == old->sgid)
94574@@ -559,7 +572,7 @@ error:
94575 /*
94576 * change the user struct in a credentials set to match the new UID
94577 */
94578-static int set_user(struct cred *new)
94579+int set_user(struct cred *new)
94580 {
94581 struct user_struct *new_user;
94582
94583@@ -567,12 +580,19 @@ static int set_user(struct cred *new)
94584 if (!new_user)
94585 return -EAGAIN;
94586
94587+ /*
94588+ * We don't fail in case of NPROC limit excess here because too many
94589+ * poorly written programs don't check set*uid() return code, assuming
94590+ * it never fails if called by root. We may still enforce NPROC limit
94591+ * for programs doing set*uid()+execve() by harmlessly deferring the
94592+ * failure to the execve() stage.
94593+ */
94594 if (atomic_read(&new_user->processes) >=
94595 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
94596- new_user != INIT_USER) {
94597- free_uid(new_user);
94598- return -EAGAIN;
94599- }
94600+ new_user != INIT_USER)
94601+ current->flags |= PF_NPROC_EXCEEDED;
94602+ else
94603+ current->flags &= ~PF_NPROC_EXCEEDED;
94604
94605 free_uid(new->user);
94606 new->user = new_user;
94607@@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
94608 goto error;
94609 }
94610
94611+ if (gr_check_user_change(new->uid, new->euid, -1))
94612+ goto error;
94613+
94614 if (new->uid != old->uid) {
94615 retval = set_user(new);
94616 if (retval < 0)
94617@@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
94618 goto error;
94619
94620 retval = -EPERM;
94621+
94622+ if (gr_check_crash_uid(uid))
94623+ goto error;
94624+ if (gr_check_user_change(uid, uid, uid))
94625+ goto error;
94626+
94627 if (capable(CAP_SETUID)) {
94628 new->suid = new->uid = uid;
94629 if (uid != old->uid) {
94630@@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
94631 goto error;
94632 }
94633
94634+ if (gr_check_user_change(ruid, euid, -1))
94635+ goto error;
94636+
94637 if (ruid != (uid_t) -1) {
94638 new->uid = ruid;
94639 if (ruid != old->uid) {
94640@@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
94641 goto error;
94642 }
94643
94644+ if (gr_check_group_change(rgid, egid, -1))
94645+ goto error;
94646+
94647 if (rgid != (gid_t) -1)
94648 new->gid = rgid;
94649 if (egid != (gid_t) -1)
94650@@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
94651 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
94652 goto error;
94653
94654+ if (gr_check_user_change(-1, -1, uid))
94655+ goto error;
94656+
94657 if (uid == old->uid || uid == old->euid ||
94658 uid == old->suid || uid == old->fsuid ||
94659 capable(CAP_SETUID)) {
94660@@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
94661 if (gid == old->gid || gid == old->egid ||
94662 gid == old->sgid || gid == old->fsgid ||
94663 capable(CAP_SETGID)) {
94664+ if (gr_check_group_change(-1, -1, gid))
94665+ goto error;
94666+
94667 if (gid != old_fsgid) {
94668 new->fsgid = gid;
94669 goto change_okay;
94670@@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
94671 error = get_dumpable(me->mm);
94672 break;
94673 case PR_SET_DUMPABLE:
94674- if (arg2 < 0 || arg2 > 1) {
94675+ if (arg2 > 1) {
94676 error = -EINVAL;
94677 break;
94678 }
94679diff --git a/kernel/sysctl.c b/kernel/sysctl.c
94680index b8bd058..ab6a76be 100644
94681--- a/kernel/sysctl.c
94682+++ b/kernel/sysctl.c
94683@@ -63,6 +63,13 @@
94684 static int deprecated_sysctl_warning(struct __sysctl_args *args);
94685
94686 #if defined(CONFIG_SYSCTL)
94687+#include <linux/grsecurity.h>
94688+#include <linux/grinternal.h>
94689+
94690+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
94691+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
94692+ const int op);
94693+extern int gr_handle_chroot_sysctl(const int op);
94694
94695 /* External variables not in a header file. */
94696 extern int C_A_D;
94697@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
94698 static int proc_taint(struct ctl_table *table, int write,
94699 void __user *buffer, size_t *lenp, loff_t *ppos);
94700 #endif
94701+extern ctl_table grsecurity_table[];
94702
94703 static struct ctl_table root_table[];
94704 static struct ctl_table_root sysctl_table_root;
94705@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
94706 int sysctl_legacy_va_layout;
94707 #endif
94708
94709+#ifdef CONFIG_PAX_SOFTMODE
94710+static ctl_table pax_table[] = {
94711+ {
94712+ .ctl_name = CTL_UNNUMBERED,
94713+ .procname = "softmode",
94714+ .data = &pax_softmode,
94715+ .maxlen = sizeof(unsigned int),
94716+ .mode = 0600,
94717+ .proc_handler = &proc_dointvec,
94718+ },
94719+
94720+ { .ctl_name = 0 }
94721+};
94722+#endif
94723+
94724 extern int prove_locking;
94725 extern int lock_stat;
94726
94727@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
94728 #endif
94729
94730 static struct ctl_table kern_table[] = {
94731+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
94732+ {
94733+ .ctl_name = CTL_UNNUMBERED,
94734+ .procname = "grsecurity",
94735+ .mode = 0500,
94736+ .child = grsecurity_table,
94737+ },
94738+#endif
94739+
94740+#ifdef CONFIG_PAX_SOFTMODE
94741+ {
94742+ .ctl_name = CTL_UNNUMBERED,
94743+ .procname = "pax",
94744+ .mode = 0500,
94745+ .child = pax_table,
94746+ },
94747+#endif
94748+
94749 {
94750 .ctl_name = CTL_UNNUMBERED,
94751 .procname = "sched_child_runs_first",
94752@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
94753 .data = &modprobe_path,
94754 .maxlen = KMOD_PATH_LEN,
94755 .mode = 0644,
94756- .proc_handler = &proc_dostring,
94757- .strategy = &sysctl_string,
94758+ .proc_handler = &proc_dostring_modpriv,
94759+ .strategy = &sysctl_string_modpriv,
94760 },
94761 {
94762 .ctl_name = CTL_UNNUMBERED,
94763@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
94764 .mode = 0644,
94765 .proc_handler = &proc_dointvec
94766 },
94767+ {
94768+ .procname = "heap_stack_gap",
94769+ .data = &sysctl_heap_stack_gap,
94770+ .maxlen = sizeof(sysctl_heap_stack_gap),
94771+ .mode = 0644,
94772+ .proc_handler = proc_doulongvec_minmax,
94773+ },
94774 #else
94775 {
94776 .ctl_name = CTL_UNNUMBERED,
94777@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
94778 return 0;
94779 }
94780
94781+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
94782+
94783 static int parse_table(int __user *name, int nlen,
94784 void __user *oldval, size_t __user *oldlenp,
94785 void __user *newval, size_t newlen,
94786@@ -1821,7 +1871,7 @@ repeat:
94787 if (n == table->ctl_name) {
94788 int error;
94789 if (table->child) {
94790- if (sysctl_perm(root, table, MAY_EXEC))
94791+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
94792 return -EPERM;
94793 name++;
94794 nlen--;
94795@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
94796 int error;
94797 int mode;
94798
94799+ if (table->parent != NULL && table->parent->procname != NULL &&
94800+ table->procname != NULL &&
94801+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
94802+ return -EACCES;
94803+ if (gr_handle_chroot_sysctl(op))
94804+ return -EACCES;
94805+ error = gr_handle_sysctl(table, op);
94806+ if (error)
94807+ return error;
94808+
94809+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
94810+ if (error)
94811+ return error;
94812+
94813+ if (root->permissions)
94814+ mode = root->permissions(root, current->nsproxy, table);
94815+ else
94816+ mode = table->mode;
94817+
94818+ return test_perm(mode, op);
94819+}
94820+
94821+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
94822+{
94823+ int error;
94824+ int mode;
94825+
94826 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
94827 if (error)
94828 return error;
94829@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write,
94830 buffer, lenp, ppos);
94831 }
94832
94833+int proc_dostring_modpriv(struct ctl_table *table, int write,
94834+ void __user *buffer, size_t *lenp, loff_t *ppos)
94835+{
94836+ if (write && !capable(CAP_SYS_MODULE))
94837+ return -EPERM;
94838+
94839+ return _proc_do_string(table->data, table->maxlen, write,
94840+ buffer, lenp, ppos);
94841+}
94842+
94843
94844 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
94845 int *valp,
94846@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
94847 vleft = table->maxlen / sizeof(unsigned long);
94848 left = *lenp;
94849
94850- for (; left && vleft--; i++, min++, max++, first=0) {
94851+ for (; left && vleft--; i++, first=0) {
94852 if (write) {
94853 while (left) {
94854 char c;
94855@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write,
94856 return -ENOSYS;
94857 }
94858
94859+int proc_dostring_modpriv(struct ctl_table *table, int write,
94860+ void __user *buffer, size_t *lenp, loff_t *ppos)
94861+{
94862+ return -ENOSYS;
94863+}
94864+
94865 int proc_dointvec(struct ctl_table *table, int write,
94866 void __user *buffer, size_t *lenp, loff_t *ppos)
94867 {
94868@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table,
94869 return 1;
94870 }
94871
94872+int sysctl_string_modpriv(struct ctl_table *table,
94873+ void __user *oldval, size_t __user *oldlenp,
94874+ void __user *newval, size_t newlen)
94875+{
94876+ if (newval && newlen && !capable(CAP_SYS_MODULE))
94877+ return -EPERM;
94878+
94879+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
94880+}
94881+
94882 /*
94883 * This function makes sure that all of the integers in the vector
94884 * are between the minimum and maximum values given in the arrays
94885@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table,
94886 return -ENOSYS;
94887 }
94888
94889+int sysctl_string_modpriv(struct ctl_table *table,
94890+ void __user *oldval, size_t __user *oldlenp,
94891+ void __user *newval, size_t newlen)
94892+{
94893+ return -ENOSYS;
94894+}
94895+
94896 int sysctl_intvec(struct ctl_table *table,
94897 void __user *oldval, size_t __user *oldlenp,
94898 void __user *newval, size_t newlen)
94899@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
94900 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
94901 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
94902 EXPORT_SYMBOL(proc_dostring);
94903+EXPORT_SYMBOL(proc_dostring_modpriv);
94904 EXPORT_SYMBOL(proc_doulongvec_minmax);
94905 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
94906 EXPORT_SYMBOL(register_sysctl_table);
94907@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
94908 EXPORT_SYMBOL(sysctl_jiffies);
94909 EXPORT_SYMBOL(sysctl_ms_jiffies);
94910 EXPORT_SYMBOL(sysctl_string);
94911+EXPORT_SYMBOL(sysctl_string_modpriv);
94912 EXPORT_SYMBOL(sysctl_data);
94913 EXPORT_SYMBOL(unregister_sysctl_table);
94914diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
94915index 469193c..ea3ecb2 100644
94916--- a/kernel/sysctl_check.c
94917+++ b/kernel/sysctl_check.c
94918@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
94919 } else {
94920 if ((table->strategy == sysctl_data) ||
94921 (table->strategy == sysctl_string) ||
94922+ (table->strategy == sysctl_string_modpriv) ||
94923 (table->strategy == sysctl_intvec) ||
94924 (table->strategy == sysctl_jiffies) ||
94925 (table->strategy == sysctl_ms_jiffies) ||
94926 (table->proc_handler == proc_dostring) ||
94927+ (table->proc_handler == proc_dostring_modpriv) ||
94928 (table->proc_handler == proc_dointvec) ||
94929 (table->proc_handler == proc_dointvec_minmax) ||
94930 (table->proc_handler == proc_dointvec_jiffies) ||
94931diff --git a/kernel/taskstats.c b/kernel/taskstats.c
94932index a4ef542..798bcd7 100644
94933--- a/kernel/taskstats.c
94934+++ b/kernel/taskstats.c
94935@@ -26,9 +26,12 @@
94936 #include <linux/cgroup.h>
94937 #include <linux/fs.h>
94938 #include <linux/file.h>
94939+#include <linux/grsecurity.h>
94940 #include <net/genetlink.h>
94941 #include <asm/atomic.h>
94942
94943+extern int gr_is_taskstats_denied(int pid);
94944+
94945 /*
94946 * Maximum length of a cpumask that can be specified in
94947 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
94948@@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
94949 size_t size;
94950 cpumask_var_t mask;
94951
94952+ if (gr_is_taskstats_denied(current->pid))
94953+ return -EACCES;
94954+
94955 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
94956 return -ENOMEM;
94957
94958diff --git a/kernel/time.c b/kernel/time.c
94959index 33df60e..ca768bd 100644
94960--- a/kernel/time.c
94961+++ b/kernel/time.c
94962@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
94963 return error;
94964
94965 if (tz) {
94966+ /* we log in do_settimeofday called below, so don't log twice
94967+ */
94968+ if (!tv)
94969+ gr_log_timechange();
94970+
94971 /* SMP safe, global irq locking makes it work. */
94972 sys_tz = *tz;
94973 update_vsyscall_tz();
94974@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
94975 * Avoid unnecessary multiplications/divisions in the
94976 * two most common HZ cases:
94977 */
94978-unsigned int inline jiffies_to_msecs(const unsigned long j)
94979+inline unsigned int jiffies_to_msecs(const unsigned long j)
94980 {
94981 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
94982 return (MSEC_PER_SEC / HZ) * j;
94983@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
94984 }
94985 EXPORT_SYMBOL(jiffies_to_msecs);
94986
94987-unsigned int inline jiffies_to_usecs(const unsigned long j)
94988+inline unsigned int jiffies_to_usecs(const unsigned long j)
94989 {
94990 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
94991 return (USEC_PER_SEC / HZ) * j;
94992diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
94993index 57b953f..06f149f 100644
94994--- a/kernel/time/tick-broadcast.c
94995+++ b/kernel/time/tick-broadcast.c
94996@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
94997 * then clear the broadcast bit.
94998 */
94999 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
95000- int cpu = smp_processor_id();
95001+ cpu = smp_processor_id();
95002
95003 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
95004 tick_broadcast_clear_oneshot(cpu);
95005diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
95006index 4a71cff..ffb5548 100644
95007--- a/kernel/time/timekeeping.c
95008+++ b/kernel/time/timekeeping.c
95009@@ -14,6 +14,7 @@
95010 #include <linux/init.h>
95011 #include <linux/mm.h>
95012 #include <linux/sched.h>
95013+#include <linux/grsecurity.h>
95014 #include <linux/sysdev.h>
95015 #include <linux/clocksource.h>
95016 #include <linux/jiffies.h>
95017@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
95018 */
95019 struct timespec ts = xtime;
95020 timespec_add_ns(&ts, nsec);
95021- ACCESS_ONCE(xtime_cache) = ts;
95022+ ACCESS_ONCE_RW(xtime_cache) = ts;
95023 }
95024
95025 /* must hold xtime_lock */
95026@@ -337,6 +338,8 @@ int do_settimeofday(struct timespec *tv)
95027 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
95028 return -EINVAL;
95029
95030+ gr_log_timechange();
95031+
95032 write_seqlock_irqsave(&xtime_lock, flags);
95033
95034 timekeeping_forward_now();
95035diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
95036index 54c0dda..e9095d9 100644
95037--- a/kernel/time/timer_list.c
95038+++ b/kernel/time/timer_list.c
95039@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
95040
95041 static void print_name_offset(struct seq_file *m, void *sym)
95042 {
95043+#ifdef CONFIG_GRKERNSEC_HIDESYM
95044+ SEQ_printf(m, "<%p>", NULL);
95045+#else
95046 char symname[KSYM_NAME_LEN];
95047
95048 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
95049 SEQ_printf(m, "<%p>", sym);
95050 else
95051 SEQ_printf(m, "%s", symname);
95052+#endif
95053 }
95054
95055 static void
95056@@ -112,7 +116,11 @@ next_one:
95057 static void
95058 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
95059 {
95060+#ifdef CONFIG_GRKERNSEC_HIDESYM
95061+ SEQ_printf(m, " .base: %p\n", NULL);
95062+#else
95063 SEQ_printf(m, " .base: %p\n", base);
95064+#endif
95065 SEQ_printf(m, " .index: %d\n",
95066 base->index);
95067 SEQ_printf(m, " .resolution: %Lu nsecs\n",
95068@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void)
95069 {
95070 struct proc_dir_entry *pe;
95071
95072+#ifdef CONFIG_GRKERNSEC_PROC_ADD
95073+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
95074+#else
95075 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
95076+#endif
95077 if (!pe)
95078 return -ENOMEM;
95079 return 0;
95080diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
95081index ee5681f..634089b 100644
95082--- a/kernel/time/timer_stats.c
95083+++ b/kernel/time/timer_stats.c
95084@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
95085 static unsigned long nr_entries;
95086 static struct entry entries[MAX_ENTRIES];
95087
95088-static atomic_t overflow_count;
95089+static atomic_unchecked_t overflow_count;
95090
95091 /*
95092 * The entries are in a hash-table, for fast lookup:
95093@@ -140,7 +140,7 @@ static void reset_entries(void)
95094 nr_entries = 0;
95095 memset(entries, 0, sizeof(entries));
95096 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
95097- atomic_set(&overflow_count, 0);
95098+ atomic_set_unchecked(&overflow_count, 0);
95099 }
95100
95101 static struct entry *alloc_entry(void)
95102@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
95103 if (likely(entry))
95104 entry->count++;
95105 else
95106- atomic_inc(&overflow_count);
95107+ atomic_inc_unchecked(&overflow_count);
95108
95109 out_unlock:
95110 spin_unlock_irqrestore(lock, flags);
95111@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
95112
95113 static void print_name_offset(struct seq_file *m, unsigned long addr)
95114 {
95115+#ifdef CONFIG_GRKERNSEC_HIDESYM
95116+ seq_printf(m, "<%p>", NULL);
95117+#else
95118 char symname[KSYM_NAME_LEN];
95119
95120 if (lookup_symbol_name(addr, symname) < 0)
95121 seq_printf(m, "<%p>", (void *)addr);
95122 else
95123 seq_printf(m, "%s", symname);
95124+#endif
95125 }
95126
95127 static int tstats_show(struct seq_file *m, void *v)
95128@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
95129
95130 seq_puts(m, "Timer Stats Version: v0.2\n");
95131 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
95132- if (atomic_read(&overflow_count))
95133+ if (atomic_read_unchecked(&overflow_count))
95134 seq_printf(m, "Overflow: %d entries\n",
95135- atomic_read(&overflow_count));
95136+ atomic_read_unchecked(&overflow_count));
95137
95138 for (i = 0; i < nr_entries; i++) {
95139 entry = entries + i;
95140@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void)
95141 {
95142 struct proc_dir_entry *pe;
95143
95144+#ifdef CONFIG_GRKERNSEC_PROC_ADD
95145+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
95146+#else
95147 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
95148+#endif
95149 if (!pe)
95150 return -ENOMEM;
95151 return 0;
95152diff --git a/kernel/timer.c b/kernel/timer.c
95153index cb3c1f1..8bf5526 100644
95154--- a/kernel/timer.c
95155+++ b/kernel/timer.c
95156@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
95157 /*
95158 * This function runs timers and the timer-tq in bottom half context.
95159 */
95160-static void run_timer_softirq(struct softirq_action *h)
95161+static void run_timer_softirq(void)
95162 {
95163 struct tvec_base *base = __get_cpu_var(tvec_bases);
95164
95165diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
95166index d9d6206..f19467e 100644
95167--- a/kernel/trace/blktrace.c
95168+++ b/kernel/trace/blktrace.c
95169@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
95170 struct blk_trace *bt = filp->private_data;
95171 char buf[16];
95172
95173- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
95174+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
95175
95176 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
95177 }
95178@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
95179 return 1;
95180
95181 bt = buf->chan->private_data;
95182- atomic_inc(&bt->dropped);
95183+ atomic_inc_unchecked(&bt->dropped);
95184 return 0;
95185 }
95186
95187@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
95188
95189 bt->dir = dir;
95190 bt->dev = dev;
95191- atomic_set(&bt->dropped, 0);
95192+ atomic_set_unchecked(&bt->dropped, 0);
95193
95194 ret = -EIO;
95195 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
95196diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
95197index 4872937..c794d40 100644
95198--- a/kernel/trace/ftrace.c
95199+++ b/kernel/trace/ftrace.c
95200@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
95201
95202 ip = rec->ip;
95203
95204+ ret = ftrace_arch_code_modify_prepare();
95205+ FTRACE_WARN_ON(ret);
95206+ if (ret)
95207+ return 0;
95208+
95209 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
95210+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
95211 if (ret) {
95212 ftrace_bug(ret, ip);
95213 rec->flags |= FTRACE_FL_FAILED;
95214- return 0;
95215 }
95216- return 1;
95217+ return ret ? 0 : 1;
95218 }
95219
95220 /*
95221diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
95222index e749a05..19c6e94 100644
95223--- a/kernel/trace/ring_buffer.c
95224+++ b/kernel/trace/ring_buffer.c
95225@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list)
95226 * the reader page). But if the next page is a header page,
95227 * its flags will be non zero.
95228 */
95229-static int inline
95230+static inline int
95231 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
95232 struct buffer_page *page, struct list_head *list)
95233 {
95234diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
95235index a2a2d1f..7f32b09 100644
95236--- a/kernel/trace/trace.c
95237+++ b/kernel/trace/trace.c
95238@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
95239 size_t rem;
95240 unsigned int i;
95241
95242+ pax_track_stack();
95243+
95244 /* copy the tracer to avoid using a global lock all around */
95245 mutex_lock(&trace_types_lock);
95246 if (unlikely(old_tracer != current_trace && current_trace)) {
95247@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
95248 int entries, size, i;
95249 size_t ret;
95250
95251+ pax_track_stack();
95252+
95253 if (*ppos & (PAGE_SIZE - 1)) {
95254 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
95255 return -EINVAL;
95256@@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = {
95257 };
95258 #endif
95259
95260-static struct dentry *d_tracer;
95261-
95262 struct dentry *tracing_init_dentry(void)
95263 {
95264+ static struct dentry *d_tracer;
95265 static int once;
95266
95267 if (d_tracer)
95268@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
95269 return d_tracer;
95270 }
95271
95272-static struct dentry *d_percpu;
95273-
95274 struct dentry *tracing_dentry_percpu(void)
95275 {
95276+ static struct dentry *d_percpu;
95277 static int once;
95278 struct dentry *d_tracer;
95279
95280diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
95281index d128f65..f37b4af 100644
95282--- a/kernel/trace/trace_events.c
95283+++ b/kernel/trace/trace_events.c
95284@@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list);
95285 * Modules must own their file_operations to keep up with
95286 * reference counting.
95287 */
95288+
95289 struct ftrace_module_file_ops {
95290 struct list_head list;
95291 struct module *mod;
95292- struct file_operations id;
95293- struct file_operations enable;
95294- struct file_operations format;
95295- struct file_operations filter;
95296 };
95297
95298 static void remove_subsystem_dir(const char *name)
95299@@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod)
95300
95301 file_ops->mod = mod;
95302
95303- file_ops->id = ftrace_event_id_fops;
95304- file_ops->id.owner = mod;
95305-
95306- file_ops->enable = ftrace_enable_fops;
95307- file_ops->enable.owner = mod;
95308-
95309- file_ops->filter = ftrace_event_filter_fops;
95310- file_ops->filter.owner = mod;
95311-
95312- file_ops->format = ftrace_event_format_fops;
95313- file_ops->format.owner = mod;
95314+ pax_open_kernel();
95315+ *(void **)&mod->trace_id.owner = mod;
95316+ *(void **)&mod->trace_enable.owner = mod;
95317+ *(void **)&mod->trace_filter.owner = mod;
95318+ *(void **)&mod->trace_format.owner = mod;
95319+ pax_close_kernel();
95320
95321 list_add(&file_ops->list, &ftrace_module_file_list);
95322
95323@@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod)
95324 call->mod = mod;
95325 list_add(&call->list, &ftrace_events);
95326 event_create_dir(call, d_events,
95327- &file_ops->id, &file_ops->enable,
95328- &file_ops->filter, &file_ops->format);
95329+ &mod->trace_id, &mod->trace_enable,
95330+ &mod->trace_filter, &mod->trace_format);
95331 }
95332 }
95333
95334diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
95335index 0acd834..b800b56 100644
95336--- a/kernel/trace/trace_mmiotrace.c
95337+++ b/kernel/trace/trace_mmiotrace.c
95338@@ -23,7 +23,7 @@ struct header_iter {
95339 static struct trace_array *mmio_trace_array;
95340 static bool overrun_detected;
95341 static unsigned long prev_overruns;
95342-static atomic_t dropped_count;
95343+static atomic_unchecked_t dropped_count;
95344
95345 static void mmio_reset_data(struct trace_array *tr)
95346 {
95347@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter)
95348
95349 static unsigned long count_overruns(struct trace_iterator *iter)
95350 {
95351- unsigned long cnt = atomic_xchg(&dropped_count, 0);
95352+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
95353 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
95354
95355 if (over > prev_overruns)
95356@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
95357 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
95358 sizeof(*entry), 0, pc);
95359 if (!event) {
95360- atomic_inc(&dropped_count);
95361+ atomic_inc_unchecked(&dropped_count);
95362 return;
95363 }
95364 entry = ring_buffer_event_data(event);
95365@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
95366 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
95367 sizeof(*entry), 0, pc);
95368 if (!event) {
95369- atomic_inc(&dropped_count);
95370+ atomic_inc_unchecked(&dropped_count);
95371 return;
95372 }
95373 entry = ring_buffer_event_data(event);
95374diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
95375index b6c12c6..41fdc53 100644
95376--- a/kernel/trace/trace_output.c
95377+++ b/kernel/trace/trace_output.c
95378@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
95379 return 0;
95380 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
95381 if (!IS_ERR(p)) {
95382- p = mangle_path(s->buffer + s->len, p, "\n");
95383+ p = mangle_path(s->buffer + s->len, p, "\n\\");
95384 if (p) {
95385 s->len = p - s->buffer;
95386 return 1;
95387diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
95388index 8504ac7..ecf0adb 100644
95389--- a/kernel/trace/trace_stack.c
95390+++ b/kernel/trace/trace_stack.c
95391@@ -50,7 +50,7 @@ static inline void check_stack(void)
95392 return;
95393
95394 /* we do not handle interrupt stacks yet */
95395- if (!object_is_on_stack(&this_size))
95396+ if (!object_starts_on_stack(&this_size))
95397 return;
95398
95399 local_irq_save(flags);
95400diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
95401index 40cafb0..d5ead43 100644
95402--- a/kernel/trace/trace_workqueue.c
95403+++ b/kernel/trace/trace_workqueue.c
95404@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
95405 int cpu;
95406 pid_t pid;
95407 /* Can be inserted from interrupt or user context, need to be atomic */
95408- atomic_t inserted;
95409+ atomic_unchecked_t inserted;
95410 /*
95411 * Don't need to be atomic, works are serialized in a single workqueue thread
95412 * on a single CPU.
95413@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
95414 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
95415 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
95416 if (node->pid == wq_thread->pid) {
95417- atomic_inc(&node->inserted);
95418+ atomic_inc_unchecked(&node->inserted);
95419 goto found;
95420 }
95421 }
95422@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
95423 tsk = get_pid_task(pid, PIDTYPE_PID);
95424 if (tsk) {
95425 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
95426- atomic_read(&cws->inserted), cws->executed,
95427+ atomic_read_unchecked(&cws->inserted), cws->executed,
95428 tsk->comm);
95429 put_task_struct(tsk);
95430 }
95431diff --git a/kernel/user.c b/kernel/user.c
95432index 1b91701..8795237 100644
95433--- a/kernel/user.c
95434+++ b/kernel/user.c
95435@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
95436 spin_lock_irq(&uidhash_lock);
95437 up = uid_hash_find(uid, hashent);
95438 if (up) {
95439+ put_user_ns(ns);
95440 key_put(new->uid_keyring);
95441 key_put(new->session_keyring);
95442 kmem_cache_free(uid_cachep, new);
95443diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
95444index 234ceb1..ad74049 100644
95445--- a/lib/Kconfig.debug
95446+++ b/lib/Kconfig.debug
95447@@ -905,7 +905,7 @@ config LATENCYTOP
95448 select STACKTRACE
95449 select SCHEDSTATS
95450 select SCHED_DEBUG
95451- depends on HAVE_LATENCYTOP_SUPPORT
95452+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
95453 help
95454 Enable this option if you want to use the LatencyTOP tool
95455 to find out which userspace is blocking on what kernel operations.
95456diff --git a/lib/bitmap.c b/lib/bitmap.c
95457index 7025658..8d14cab 100644
95458--- a/lib/bitmap.c
95459+++ b/lib/bitmap.c
95460@@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
95461 {
95462 int c, old_c, totaldigits, ndigits, nchunks, nbits;
95463 u32 chunk;
95464- const char __user *ubuf = buf;
95465+ const char __user *ubuf = (const char __force_user *)buf;
95466
95467 bitmap_zero(maskp, nmaskbits);
95468
95469@@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf,
95470 {
95471 if (!access_ok(VERIFY_READ, ubuf, ulen))
95472 return -EFAULT;
95473- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
95474+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
95475 }
95476 EXPORT_SYMBOL(bitmap_parse_user);
95477
95478diff --git a/lib/bug.c b/lib/bug.c
95479index 300e41a..2779eb0 100644
95480--- a/lib/bug.c
95481+++ b/lib/bug.c
95482@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
95483 return BUG_TRAP_TYPE_NONE;
95484
95485 bug = find_bug(bugaddr);
95486+ if (!bug)
95487+ return BUG_TRAP_TYPE_NONE;
95488
95489 printk(KERN_EMERG "------------[ cut here ]------------\n");
95490
95491diff --git a/lib/debugobjects.c b/lib/debugobjects.c
95492index 2b413db..e21d207 100644
95493--- a/lib/debugobjects.c
95494+++ b/lib/debugobjects.c
95495@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
95496 if (limit > 4)
95497 return;
95498
95499- is_on_stack = object_is_on_stack(addr);
95500+ is_on_stack = object_starts_on_stack(addr);
95501 if (is_on_stack == onstack)
95502 return;
95503
95504diff --git a/lib/devres.c b/lib/devres.c
95505index 72c8909..7543868 100644
95506--- a/lib/devres.c
95507+++ b/lib/devres.c
95508@@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
95509 {
95510 iounmap(addr);
95511 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
95512- (void *)addr));
95513+ (void __force *)addr));
95514 }
95515 EXPORT_SYMBOL(devm_iounmap);
95516
95517@@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
95518 {
95519 ioport_unmap(addr);
95520 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
95521- devm_ioport_map_match, (void *)addr));
95522+ devm_ioport_map_match, (void __force *)addr));
95523 }
95524 EXPORT_SYMBOL(devm_ioport_unmap);
95525
95526diff --git a/lib/dma-debug.c b/lib/dma-debug.c
95527index 084e879..0674448 100644
95528--- a/lib/dma-debug.c
95529+++ b/lib/dma-debug.c
95530@@ -861,7 +861,7 @@ out:
95531
95532 static void check_for_stack(struct device *dev, void *addr)
95533 {
95534- if (object_is_on_stack(addr))
95535+ if (object_starts_on_stack(addr))
95536 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
95537 "stack [addr=%p]\n", addr);
95538 }
95539diff --git a/lib/idr.c b/lib/idr.c
95540index eda7ba3..915dfae 100644
95541--- a/lib/idr.c
95542+++ b/lib/idr.c
95543@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
95544 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
95545
95546 /* if already at the top layer, we need to grow */
95547- if (id >= 1 << (idp->layers * IDR_BITS)) {
95548+ if (id >= (1 << (idp->layers * IDR_BITS))) {
95549 *starting_id = id;
95550 return IDR_NEED_TO_GROW;
95551 }
95552diff --git a/lib/inflate.c b/lib/inflate.c
95553index d102559..4215f31 100644
95554--- a/lib/inflate.c
95555+++ b/lib/inflate.c
95556@@ -266,7 +266,7 @@ static void free(void *where)
95557 malloc_ptr = free_mem_ptr;
95558 }
95559 #else
95560-#define malloc(a) kmalloc(a, GFP_KERNEL)
95561+#define malloc(a) kmalloc((a), GFP_KERNEL)
95562 #define free(a) kfree(a)
95563 #endif
95564
95565diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
95566index bd2bea9..6b3c95e 100644
95567--- a/lib/is_single_threaded.c
95568+++ b/lib/is_single_threaded.c
95569@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
95570 struct task_struct *p, *t;
95571 bool ret;
95572
95573+ if (!mm)
95574+ return true;
95575+
95576 if (atomic_read(&task->signal->live) != 1)
95577 return false;
95578
95579diff --git a/lib/kobject.c b/lib/kobject.c
95580index b512b74..8115eb1 100644
95581--- a/lib/kobject.c
95582+++ b/lib/kobject.c
95583@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
95584 return ret;
95585 }
95586
95587-struct sysfs_ops kobj_sysfs_ops = {
95588+const struct sysfs_ops kobj_sysfs_ops = {
95589 .show = kobj_attr_show,
95590 .store = kobj_attr_store,
95591 };
95592@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
95593 * If the kset was not able to be created, NULL will be returned.
95594 */
95595 static struct kset *kset_create(const char *name,
95596- struct kset_uevent_ops *uevent_ops,
95597+ const struct kset_uevent_ops *uevent_ops,
95598 struct kobject *parent_kobj)
95599 {
95600 struct kset *kset;
95601@@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
95602 * If the kset was not able to be created, NULL will be returned.
95603 */
95604 struct kset *kset_create_and_add(const char *name,
95605- struct kset_uevent_ops *uevent_ops,
95606+ const struct kset_uevent_ops *uevent_ops,
95607 struct kobject *parent_kobj)
95608 {
95609 struct kset *kset;
95610diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
95611index 507b821..0bf8ed0 100644
95612--- a/lib/kobject_uevent.c
95613+++ b/lib/kobject_uevent.c
95614@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
95615 const char *subsystem;
95616 struct kobject *top_kobj;
95617 struct kset *kset;
95618- struct kset_uevent_ops *uevent_ops;
95619+ const struct kset_uevent_ops *uevent_ops;
95620 u64 seq;
95621 int i = 0;
95622 int retval = 0;
95623diff --git a/lib/kref.c b/lib/kref.c
95624index 9ecd6e8..12c94c1 100644
95625--- a/lib/kref.c
95626+++ b/lib/kref.c
95627@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
95628 */
95629 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
95630 {
95631- WARN_ON(release == NULL);
95632+ BUG_ON(release == NULL);
95633 WARN_ON(release == (void (*)(struct kref *))kfree);
95634
95635 if (atomic_dec_and_test(&kref->refcount)) {
95636diff --git a/lib/radix-tree.c b/lib/radix-tree.c
95637index 92cdd99..a8149d7 100644
95638--- a/lib/radix-tree.c
95639+++ b/lib/radix-tree.c
95640@@ -81,7 +81,7 @@ struct radix_tree_preload {
95641 int nr;
95642 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
95643 };
95644-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
95645+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
95646
95647 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
95648 {
95649diff --git a/lib/random32.c b/lib/random32.c
95650index 217d5c4..45aba8a 100644
95651--- a/lib/random32.c
95652+++ b/lib/random32.c
95653@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state)
95654 */
95655 static inline u32 __seed(u32 x, u32 m)
95656 {
95657- return (x < m) ? x + m : x;
95658+ return (x <= m) ? x + m + 1 : x;
95659 }
95660
95661 /**
95662diff --git a/lib/vsprintf.c b/lib/vsprintf.c
95663index 33bed5e..1477e46 100644
95664--- a/lib/vsprintf.c
95665+++ b/lib/vsprintf.c
95666@@ -16,6 +16,9 @@
95667 * - scnprintf and vscnprintf
95668 */
95669
95670+#ifdef CONFIG_GRKERNSEC_HIDESYM
95671+#define __INCLUDED_BY_HIDESYM 1
95672+#endif
95673 #include <stdarg.h>
95674 #include <linux/module.h>
95675 #include <linux/types.h>
95676@@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num,
95677 return buf;
95678 }
95679
95680-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
95681+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
95682 {
95683 int len, i;
95684
95685 if ((unsigned long)s < PAGE_SIZE)
95686- s = "<NULL>";
95687+ s = "(null)";
95688
95689 len = strnlen(s, spec.precision);
95690
95691@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
95692 unsigned long value = (unsigned long) ptr;
95693 #ifdef CONFIG_KALLSYMS
95694 char sym[KSYM_SYMBOL_LEN];
95695- if (ext != 'f' && ext != 's')
95696+ if (ext != 'f' && ext != 's' && ext != 'a')
95697 sprint_symbol(sym, value);
95698 else
95699 kallsyms_lookup(value, NULL, NULL, NULL, sym);
95700@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
95701 * - 'f' For simple symbolic function names without offset
95702 * - 'S' For symbolic direct pointers with offset
95703 * - 's' For symbolic direct pointers without offset
95704+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
95705+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
95706 * - 'R' For a struct resource pointer, it prints the range of
95707 * addresses (not the name nor the flags)
95708 * - 'M' For a 6-byte MAC address, it prints the address in the
95709@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95710 struct printf_spec spec)
95711 {
95712 if (!ptr)
95713- return string(buf, end, "(null)", spec);
95714+ return string(buf, end, "(nil)", spec);
95715
95716 switch (*fmt) {
95717 case 'F':
95718@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95719 case 's':
95720 /* Fallthrough */
95721 case 'S':
95722+#ifdef CONFIG_GRKERNSEC_HIDESYM
95723+ break;
95724+#else
95725+ return symbol_string(buf, end, ptr, spec, *fmt);
95726+#endif
95727+ case 'a':
95728+ /* Fallthrough */
95729+ case 'A':
95730 return symbol_string(buf, end, ptr, spec, *fmt);
95731 case 'R':
95732 return resource_string(buf, end, ptr, spec);
95733@@ -1445,7 +1458,7 @@ do { \
95734 size_t len;
95735 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
95736 || (unsigned long)save_str < PAGE_SIZE)
95737- save_str = "<NULL>";
95738+ save_str = "(null)";
95739 len = strlen(save_str);
95740 if (str + len + 1 < end)
95741 memcpy(str, save_str, len + 1);
95742@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95743 typeof(type) value; \
95744 if (sizeof(type) == 8) { \
95745 args = PTR_ALIGN(args, sizeof(u32)); \
95746- *(u32 *)&value = *(u32 *)args; \
95747- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
95748+ *(u32 *)&value = *(const u32 *)args; \
95749+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
95750 } else { \
95751 args = PTR_ALIGN(args, sizeof(type)); \
95752- value = *(typeof(type) *)args; \
95753+ value = *(const typeof(type) *)args; \
95754 } \
95755 args += sizeof(type); \
95756 value; \
95757@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95758 const char *str_arg = args;
95759 size_t len = strlen(str_arg);
95760 args += len + 1;
95761- str = string(str, end, (char *)str_arg, spec);
95762+ str = string(str, end, str_arg, spec);
95763 break;
95764 }
95765
95766diff --git a/localversion-grsec b/localversion-grsec
95767new file mode 100644
95768index 0000000..7cd6065
95769--- /dev/null
95770+++ b/localversion-grsec
95771@@ -0,0 +1 @@
95772+-grsec
95773diff --git a/mm/Kconfig b/mm/Kconfig
95774index 2c19c0b..f3c3f83 100644
95775--- a/mm/Kconfig
95776+++ b/mm/Kconfig
95777@@ -228,7 +228,7 @@ config KSM
95778 config DEFAULT_MMAP_MIN_ADDR
95779 int "Low address space to protect from user allocation"
95780 depends on MMU
95781- default 4096
95782+ default 65536
95783 help
95784 This is the portion of low virtual memory which should be protected
95785 from userspace allocation. Keeping a user from writing to low pages
95786diff --git a/mm/backing-dev.c b/mm/backing-dev.c
95787index 67a33a5..094dcf1 100644
95788--- a/mm/backing-dev.c
95789+++ b/mm/backing-dev.c
95790@@ -272,7 +272,7 @@ static void bdi_task_init(struct backing_dev_info *bdi,
95791 list_add_tail_rcu(&wb->list, &bdi->wb_list);
95792 spin_unlock(&bdi->wb_lock);
95793
95794- tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
95795+ tsk->flags |= PF_SWAPWRITE;
95796 set_freezable();
95797
95798 /*
95799@@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rcu_head *head)
95800 * Add the default flusher task that gets created for any bdi
95801 * that has dirty data pending writeout
95802 */
95803-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
95804+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
95805 {
95806 if (!bdi_cap_writeback_dirty(bdi))
95807 return;
95808diff --git a/mm/filemap.c b/mm/filemap.c
95809index a1fe378..e26702f 100644
95810--- a/mm/filemap.c
95811+++ b/mm/filemap.c
95812@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
95813 struct address_space *mapping = file->f_mapping;
95814
95815 if (!mapping->a_ops->readpage)
95816- return -ENOEXEC;
95817+ return -ENODEV;
95818 file_accessed(file);
95819 vma->vm_ops = &generic_file_vm_ops;
95820 vma->vm_flags |= VM_CAN_NONLINEAR;
95821@@ -2024,6 +2024,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
95822 *pos = i_size_read(inode);
95823
95824 if (limit != RLIM_INFINITY) {
95825+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
95826 if (*pos >= limit) {
95827 send_sig(SIGXFSZ, current, 0);
95828 return -EFBIG;
95829diff --git a/mm/fremap.c b/mm/fremap.c
95830index b6ec85a..a24ac22 100644
95831--- a/mm/fremap.c
95832+++ b/mm/fremap.c
95833@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
95834 retry:
95835 vma = find_vma(mm, start);
95836
95837+#ifdef CONFIG_PAX_SEGMEXEC
95838+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
95839+ goto out;
95840+#endif
95841+
95842 /*
95843 * Make sure the vma is shared, that it supports prefaulting,
95844 * and that the remapped range is valid and fully within
95845@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
95846 /*
95847 * drop PG_Mlocked flag for over-mapped range
95848 */
95849- unsigned int saved_flags = vma->vm_flags;
95850+ unsigned long saved_flags = vma->vm_flags;
95851 munlock_vma_pages_range(vma, start, start + size);
95852 vma->vm_flags = saved_flags;
95853 }
95854diff --git a/mm/highmem.c b/mm/highmem.c
95855index 9c1e627..5ca9447 100644
95856--- a/mm/highmem.c
95857+++ b/mm/highmem.c
95858@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
95859 * So no dangers, even with speculative execution.
95860 */
95861 page = pte_page(pkmap_page_table[i]);
95862+ pax_open_kernel();
95863 pte_clear(&init_mm, (unsigned long)page_address(page),
95864 &pkmap_page_table[i]);
95865-
95866+ pax_close_kernel();
95867 set_page_address(page, NULL);
95868 need_flush = 1;
95869 }
95870@@ -177,9 +178,11 @@ start:
95871 }
95872 }
95873 vaddr = PKMAP_ADDR(last_pkmap_nr);
95874+
95875+ pax_open_kernel();
95876 set_pte_at(&init_mm, vaddr,
95877 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
95878-
95879+ pax_close_kernel();
95880 pkmap_count[last_pkmap_nr] = 1;
95881 set_page_address(page, (void *)vaddr);
95882
95883diff --git a/mm/hugetlb.c b/mm/hugetlb.c
95884index 5e1e508..ac70275 100644
95885--- a/mm/hugetlb.c
95886+++ b/mm/hugetlb.c
95887@@ -869,6 +869,7 @@ free:
95888 list_del(&page->lru);
95889 enqueue_huge_page(h, page);
95890 }
95891+ spin_unlock(&hugetlb_lock);
95892
95893 /* Free unnecessary surplus pages to the buddy allocator */
95894 if (!list_empty(&surplus_list)) {
95895@@ -1933,6 +1934,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
95896 return 1;
95897 }
95898
95899+#ifdef CONFIG_PAX_SEGMEXEC
95900+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
95901+{
95902+ struct mm_struct *mm = vma->vm_mm;
95903+ struct vm_area_struct *vma_m;
95904+ unsigned long address_m;
95905+ pte_t *ptep_m;
95906+
95907+ vma_m = pax_find_mirror_vma(vma);
95908+ if (!vma_m)
95909+ return;
95910+
95911+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
95912+ address_m = address + SEGMEXEC_TASK_SIZE;
95913+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
95914+ get_page(page_m);
95915+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
95916+}
95917+#endif
95918+
95919 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
95920 unsigned long address, pte_t *ptep, pte_t pte,
95921 struct page *pagecache_page)
95922@@ -2004,6 +2025,11 @@ retry_avoidcopy:
95923 huge_ptep_clear_flush(vma, address, ptep);
95924 set_huge_pte_at(mm, address, ptep,
95925 make_huge_pte(vma, new_page, 1));
95926+
95927+#ifdef CONFIG_PAX_SEGMEXEC
95928+ pax_mirror_huge_pte(vma, address, new_page);
95929+#endif
95930+
95931 /* Make the old page be freed below */
95932 new_page = old_page;
95933 }
95934@@ -2135,6 +2161,10 @@ retry:
95935 && (vma->vm_flags & VM_SHARED)));
95936 set_huge_pte_at(mm, address, ptep, new_pte);
95937
95938+#ifdef CONFIG_PAX_SEGMEXEC
95939+ pax_mirror_huge_pte(vma, address, page);
95940+#endif
95941+
95942 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
95943 /* Optimization, do the COW without a second fault */
95944 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
95945@@ -2163,6 +2193,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95946 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
95947 struct hstate *h = hstate_vma(vma);
95948
95949+#ifdef CONFIG_PAX_SEGMEXEC
95950+ struct vm_area_struct *vma_m;
95951+
95952+ vma_m = pax_find_mirror_vma(vma);
95953+ if (vma_m) {
95954+ unsigned long address_m;
95955+
95956+ if (vma->vm_start > vma_m->vm_start) {
95957+ address_m = address;
95958+ address -= SEGMEXEC_TASK_SIZE;
95959+ vma = vma_m;
95960+ h = hstate_vma(vma);
95961+ } else
95962+ address_m = address + SEGMEXEC_TASK_SIZE;
95963+
95964+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
95965+ return VM_FAULT_OOM;
95966+ address_m &= HPAGE_MASK;
95967+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
95968+ }
95969+#endif
95970+
95971 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
95972 if (!ptep)
95973 return VM_FAULT_OOM;
95974diff --git a/mm/internal.h b/mm/internal.h
95975index f03e8e2..7354343 100644
95976--- a/mm/internal.h
95977+++ b/mm/internal.h
95978@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page);
95979 * in mm/page_alloc.c
95980 */
95981 extern void __free_pages_bootmem(struct page *page, unsigned int order);
95982+extern void free_compound_page(struct page *page);
95983 extern void prep_compound_page(struct page *page, unsigned long order);
95984
95985
95986diff --git a/mm/kmemleak.c b/mm/kmemleak.c
95987index c346660..b47382f 100644
95988--- a/mm/kmemleak.c
95989+++ b/mm/kmemleak.c
95990@@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq,
95991
95992 for (i = 0; i < object->trace_len; i++) {
95993 void *ptr = (void *)object->trace[i];
95994- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
95995+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
95996 }
95997 }
95998
95999diff --git a/mm/maccess.c b/mm/maccess.c
96000index 9073695..1127f348 100644
96001--- a/mm/maccess.c
96002+++ b/mm/maccess.c
96003@@ -14,7 +14,7 @@
96004 * Safely read from address @src to the buffer at @dst. If a kernel fault
96005 * happens, handle that and return -EFAULT.
96006 */
96007-long probe_kernel_read(void *dst, void *src, size_t size)
96008+long probe_kernel_read(void *dst, const void *src, size_t size)
96009 {
96010 long ret;
96011 mm_segment_t old_fs = get_fs();
96012@@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
96013 set_fs(KERNEL_DS);
96014 pagefault_disable();
96015 ret = __copy_from_user_inatomic(dst,
96016- (__force const void __user *)src, size);
96017+ (const void __force_user *)src, size);
96018 pagefault_enable();
96019 set_fs(old_fs);
96020
96021@@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
96022 * Safely write to address @dst from the buffer at @src. If a kernel fault
96023 * happens, handle that and return -EFAULT.
96024 */
96025-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
96026+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
96027 {
96028 long ret;
96029 mm_segment_t old_fs = get_fs();
96030
96031 set_fs(KERNEL_DS);
96032 pagefault_disable();
96033- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
96034+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
96035 pagefault_enable();
96036 set_fs(old_fs);
96037
96038diff --git a/mm/madvise.c b/mm/madvise.c
96039index 35b1479..499f7d4 100644
96040--- a/mm/madvise.c
96041+++ b/mm/madvise.c
96042@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
96043 pgoff_t pgoff;
96044 unsigned long new_flags = vma->vm_flags;
96045
96046+#ifdef CONFIG_PAX_SEGMEXEC
96047+ struct vm_area_struct *vma_m;
96048+#endif
96049+
96050 switch (behavior) {
96051 case MADV_NORMAL:
96052 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
96053@@ -103,6 +107,13 @@ success:
96054 /*
96055 * vm_flags is protected by the mmap_sem held in write mode.
96056 */
96057+
96058+#ifdef CONFIG_PAX_SEGMEXEC
96059+ vma_m = pax_find_mirror_vma(vma);
96060+ if (vma_m)
96061+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
96062+#endif
96063+
96064 vma->vm_flags = new_flags;
96065
96066 out:
96067@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
96068 struct vm_area_struct ** prev,
96069 unsigned long start, unsigned long end)
96070 {
96071+
96072+#ifdef CONFIG_PAX_SEGMEXEC
96073+ struct vm_area_struct *vma_m;
96074+#endif
96075+
96076 *prev = vma;
96077 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
96078 return -EINVAL;
96079@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
96080 zap_page_range(vma, start, end - start, &details);
96081 } else
96082 zap_page_range(vma, start, end - start, NULL);
96083+
96084+#ifdef CONFIG_PAX_SEGMEXEC
96085+ vma_m = pax_find_mirror_vma(vma);
96086+ if (vma_m) {
96087+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
96088+ struct zap_details details = {
96089+ .nonlinear_vma = vma_m,
96090+ .last_index = ULONG_MAX,
96091+ };
96092+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
96093+ } else
96094+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
96095+ }
96096+#endif
96097+
96098 return 0;
96099 }
96100
96101@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
96102 if (end < start)
96103 goto out;
96104
96105+#ifdef CONFIG_PAX_SEGMEXEC
96106+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
96107+ if (end > SEGMEXEC_TASK_SIZE)
96108+ goto out;
96109+ } else
96110+#endif
96111+
96112+ if (end > TASK_SIZE)
96113+ goto out;
96114+
96115 error = 0;
96116 if (end == start)
96117 goto out;
96118diff --git a/mm/memory-failure.c b/mm/memory-failure.c
96119index 8aeba53..b4a4198 100644
96120--- a/mm/memory-failure.c
96121+++ b/mm/memory-failure.c
96122@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
96123
96124 int sysctl_memory_failure_recovery __read_mostly = 1;
96125
96126-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
96127+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
96128
96129 /*
96130 * Send all the processes who have the page mapped an ``action optional''
96131@@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
96132 si.si_signo = SIGBUS;
96133 si.si_errno = 0;
96134 si.si_code = BUS_MCEERR_AO;
96135- si.si_addr = (void *)addr;
96136+ si.si_addr = (void __user *)addr;
96137 #ifdef __ARCH_SI_TRAPNO
96138 si.si_trapno = trapno;
96139 #endif
96140@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
96141 return 0;
96142 }
96143
96144- atomic_long_add(1, &mce_bad_pages);
96145+ atomic_long_add_unchecked(1, &mce_bad_pages);
96146
96147 /*
96148 * We need/can do nothing about count=0 pages.
96149diff --git a/mm/memory.c b/mm/memory.c
96150index 6c836d3..48f3264 100644
96151--- a/mm/memory.c
96152+++ b/mm/memory.c
96153@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
96154 return;
96155
96156 pmd = pmd_offset(pud, start);
96157+
96158+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
96159 pud_clear(pud);
96160 pmd_free_tlb(tlb, pmd, start);
96161+#endif
96162+
96163 }
96164
96165 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96166@@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96167 if (end - 1 > ceiling - 1)
96168 return;
96169
96170+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
96171 pud = pud_offset(pgd, start);
96172 pgd_clear(pgd);
96173 pud_free_tlb(tlb, pud, start);
96174+#endif
96175+
96176 }
96177
96178 /*
96179@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
96180 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
96181 i = 0;
96182
96183- do {
96184+ while (nr_pages) {
96185 struct vm_area_struct *vma;
96186
96187- vma = find_extend_vma(mm, start);
96188+ vma = find_vma(mm, start);
96189 if (!vma && in_gate_area(tsk, start)) {
96190 unsigned long pg = start & PAGE_MASK;
96191 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
96192@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
96193 continue;
96194 }
96195
96196- if (!vma ||
96197+ if (!vma || start < vma->vm_start ||
96198 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
96199 !(vm_flags & vma->vm_flags))
96200 return i ? : -EFAULT;
96201@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
96202 start += PAGE_SIZE;
96203 nr_pages--;
96204 } while (nr_pages && start < vma->vm_end);
96205- } while (nr_pages);
96206+ }
96207 return i;
96208 }
96209
96210@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
96211 page_add_file_rmap(page);
96212 set_pte_at(mm, addr, pte, mk_pte(page, prot));
96213
96214+#ifdef CONFIG_PAX_SEGMEXEC
96215+ pax_mirror_file_pte(vma, addr, page, ptl);
96216+#endif
96217+
96218 retval = 0;
96219 pte_unmap_unlock(pte, ptl);
96220 return retval;
96221@@ -1560,10 +1571,22 @@ out:
96222 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
96223 struct page *page)
96224 {
96225+
96226+#ifdef CONFIG_PAX_SEGMEXEC
96227+ struct vm_area_struct *vma_m;
96228+#endif
96229+
96230 if (addr < vma->vm_start || addr >= vma->vm_end)
96231 return -EFAULT;
96232 if (!page_count(page))
96233 return -EINVAL;
96234+
96235+#ifdef CONFIG_PAX_SEGMEXEC
96236+ vma_m = pax_find_mirror_vma(vma);
96237+ if (vma_m)
96238+ vma_m->vm_flags |= VM_INSERTPAGE;
96239+#endif
96240+
96241 vma->vm_flags |= VM_INSERTPAGE;
96242 return insert_page(vma, addr, page, vma->vm_page_prot);
96243 }
96244@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
96245 unsigned long pfn)
96246 {
96247 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
96248+ BUG_ON(vma->vm_mirror);
96249
96250 if (addr < vma->vm_start || addr >= vma->vm_end)
96251 return -EFAULT;
96252@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
96253 copy_user_highpage(dst, src, va, vma);
96254 }
96255
96256+#ifdef CONFIG_PAX_SEGMEXEC
96257+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
96258+{
96259+ struct mm_struct *mm = vma->vm_mm;
96260+ spinlock_t *ptl;
96261+ pte_t *pte, entry;
96262+
96263+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
96264+ entry = *pte;
96265+ if (!pte_present(entry)) {
96266+ if (!pte_none(entry)) {
96267+ BUG_ON(pte_file(entry));
96268+ free_swap_and_cache(pte_to_swp_entry(entry));
96269+ pte_clear_not_present_full(mm, address, pte, 0);
96270+ }
96271+ } else {
96272+ struct page *page;
96273+
96274+ flush_cache_page(vma, address, pte_pfn(entry));
96275+ entry = ptep_clear_flush(vma, address, pte);
96276+ BUG_ON(pte_dirty(entry));
96277+ page = vm_normal_page(vma, address, entry);
96278+ if (page) {
96279+ update_hiwater_rss(mm);
96280+ if (PageAnon(page))
96281+ dec_mm_counter(mm, anon_rss);
96282+ else
96283+ dec_mm_counter(mm, file_rss);
96284+ page_remove_rmap(page);
96285+ page_cache_release(page);
96286+ }
96287+ }
96288+ pte_unmap_unlock(pte, ptl);
96289+}
96290+
96291+/* PaX: if vma is mirrored, synchronize the mirror's PTE
96292+ *
96293+ * the ptl of the lower mapped page is held on entry and is not released on exit
96294+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
96295+ */
96296+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96297+{
96298+ struct mm_struct *mm = vma->vm_mm;
96299+ unsigned long address_m;
96300+ spinlock_t *ptl_m;
96301+ struct vm_area_struct *vma_m;
96302+ pmd_t *pmd_m;
96303+ pte_t *pte_m, entry_m;
96304+
96305+ BUG_ON(!page_m || !PageAnon(page_m));
96306+
96307+ vma_m = pax_find_mirror_vma(vma);
96308+ if (!vma_m)
96309+ return;
96310+
96311+ BUG_ON(!PageLocked(page_m));
96312+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96313+ address_m = address + SEGMEXEC_TASK_SIZE;
96314+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96315+ pte_m = pte_offset_map_nested(pmd_m, address_m);
96316+ ptl_m = pte_lockptr(mm, pmd_m);
96317+ if (ptl != ptl_m) {
96318+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96319+ if (!pte_none(*pte_m))
96320+ goto out;
96321+ }
96322+
96323+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96324+ page_cache_get(page_m);
96325+ page_add_anon_rmap(page_m, vma_m, address_m);
96326+ inc_mm_counter(mm, anon_rss);
96327+ set_pte_at(mm, address_m, pte_m, entry_m);
96328+ update_mmu_cache(vma_m, address_m, entry_m);
96329+out:
96330+ if (ptl != ptl_m)
96331+ spin_unlock(ptl_m);
96332+ pte_unmap_nested(pte_m);
96333+ unlock_page(page_m);
96334+}
96335+
96336+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96337+{
96338+ struct mm_struct *mm = vma->vm_mm;
96339+ unsigned long address_m;
96340+ spinlock_t *ptl_m;
96341+ struct vm_area_struct *vma_m;
96342+ pmd_t *pmd_m;
96343+ pte_t *pte_m, entry_m;
96344+
96345+ BUG_ON(!page_m || PageAnon(page_m));
96346+
96347+ vma_m = pax_find_mirror_vma(vma);
96348+ if (!vma_m)
96349+ return;
96350+
96351+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96352+ address_m = address + SEGMEXEC_TASK_SIZE;
96353+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96354+ pte_m = pte_offset_map_nested(pmd_m, address_m);
96355+ ptl_m = pte_lockptr(mm, pmd_m);
96356+ if (ptl != ptl_m) {
96357+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96358+ if (!pte_none(*pte_m))
96359+ goto out;
96360+ }
96361+
96362+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96363+ page_cache_get(page_m);
96364+ page_add_file_rmap(page_m);
96365+ inc_mm_counter(mm, file_rss);
96366+ set_pte_at(mm, address_m, pte_m, entry_m);
96367+ update_mmu_cache(vma_m, address_m, entry_m);
96368+out:
96369+ if (ptl != ptl_m)
96370+ spin_unlock(ptl_m);
96371+ pte_unmap_nested(pte_m);
96372+}
96373+
96374+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
96375+{
96376+ struct mm_struct *mm = vma->vm_mm;
96377+ unsigned long address_m;
96378+ spinlock_t *ptl_m;
96379+ struct vm_area_struct *vma_m;
96380+ pmd_t *pmd_m;
96381+ pte_t *pte_m, entry_m;
96382+
96383+ vma_m = pax_find_mirror_vma(vma);
96384+ if (!vma_m)
96385+ return;
96386+
96387+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96388+ address_m = address + SEGMEXEC_TASK_SIZE;
96389+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96390+ pte_m = pte_offset_map_nested(pmd_m, address_m);
96391+ ptl_m = pte_lockptr(mm, pmd_m);
96392+ if (ptl != ptl_m) {
96393+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96394+ if (!pte_none(*pte_m))
96395+ goto out;
96396+ }
96397+
96398+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
96399+ set_pte_at(mm, address_m, pte_m, entry_m);
96400+out:
96401+ if (ptl != ptl_m)
96402+ spin_unlock(ptl_m);
96403+ pte_unmap_nested(pte_m);
96404+}
96405+
96406+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
96407+{
96408+ struct page *page_m;
96409+ pte_t entry;
96410+
96411+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
96412+ goto out;
96413+
96414+ entry = *pte;
96415+ page_m = vm_normal_page(vma, address, entry);
96416+ if (!page_m)
96417+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
96418+ else if (PageAnon(page_m)) {
96419+ if (pax_find_mirror_vma(vma)) {
96420+ pte_unmap_unlock(pte, ptl);
96421+ lock_page(page_m);
96422+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
96423+ if (pte_same(entry, *pte))
96424+ pax_mirror_anon_pte(vma, address, page_m, ptl);
96425+ else
96426+ unlock_page(page_m);
96427+ }
96428+ } else
96429+ pax_mirror_file_pte(vma, address, page_m, ptl);
96430+
96431+out:
96432+ pte_unmap_unlock(pte, ptl);
96433+}
96434+#endif
96435+
96436 /*
96437 * This routine handles present pages, when users try to write
96438 * to a shared page. It is done by copying the page to a new address
96439@@ -2156,6 +2360,12 @@ gotten:
96440 */
96441 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96442 if (likely(pte_same(*page_table, orig_pte))) {
96443+
96444+#ifdef CONFIG_PAX_SEGMEXEC
96445+ if (pax_find_mirror_vma(vma))
96446+ BUG_ON(!trylock_page(new_page));
96447+#endif
96448+
96449 if (old_page) {
96450 if (!PageAnon(old_page)) {
96451 dec_mm_counter(mm, file_rss);
96452@@ -2207,6 +2417,10 @@ gotten:
96453 page_remove_rmap(old_page);
96454 }
96455
96456+#ifdef CONFIG_PAX_SEGMEXEC
96457+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96458+#endif
96459+
96460 /* Free the old page.. */
96461 new_page = old_page;
96462 ret |= VM_FAULT_WRITE;
96463@@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96464 swap_free(entry);
96465 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
96466 try_to_free_swap(page);
96467+
96468+#ifdef CONFIG_PAX_SEGMEXEC
96469+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
96470+#endif
96471+
96472 unlock_page(page);
96473
96474 if (flags & FAULT_FLAG_WRITE) {
96475@@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96476
96477 /* No need to invalidate - it was non-present before */
96478 update_mmu_cache(vma, address, pte);
96479+
96480+#ifdef CONFIG_PAX_SEGMEXEC
96481+ pax_mirror_anon_pte(vma, address, page, ptl);
96482+#endif
96483+
96484 unlock:
96485 pte_unmap_unlock(page_table, ptl);
96486 out:
96487@@ -2632,40 +2856,6 @@ out_release:
96488 }
96489
96490 /*
96491- * This is like a special single-page "expand_{down|up}wards()",
96492- * except we must first make sure that 'address{-|+}PAGE_SIZE'
96493- * doesn't hit another vma.
96494- */
96495-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
96496-{
96497- address &= PAGE_MASK;
96498- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
96499- struct vm_area_struct *prev = vma->vm_prev;
96500-
96501- /*
96502- * Is there a mapping abutting this one below?
96503- *
96504- * That's only ok if it's the same stack mapping
96505- * that has gotten split..
96506- */
96507- if (prev && prev->vm_end == address)
96508- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
96509-
96510- expand_stack(vma, address - PAGE_SIZE);
96511- }
96512- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
96513- struct vm_area_struct *next = vma->vm_next;
96514-
96515- /* As VM_GROWSDOWN but s/below/above/ */
96516- if (next && next->vm_start == address + PAGE_SIZE)
96517- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
96518-
96519- expand_upwards(vma, address + PAGE_SIZE);
96520- }
96521- return 0;
96522-}
96523-
96524-/*
96525 * We enter with non-exclusive mmap_sem (to exclude vma changes,
96526 * but allow concurrent faults), and pte mapped but not yet locked.
96527 * We return with mmap_sem still held, but pte unmapped and unlocked.
96528@@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96529 unsigned long address, pte_t *page_table, pmd_t *pmd,
96530 unsigned int flags)
96531 {
96532- struct page *page;
96533+ struct page *page = NULL;
96534 spinlock_t *ptl;
96535 pte_t entry;
96536
96537- pte_unmap(page_table);
96538-
96539- /* Check if we need to add a guard page to the stack */
96540- if (check_stack_guard_page(vma, address) < 0)
96541- return VM_FAULT_SIGBUS;
96542-
96543- /* Use the zero-page for reads */
96544 if (!(flags & FAULT_FLAG_WRITE)) {
96545 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
96546 vma->vm_page_prot));
96547- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96548+ ptl = pte_lockptr(mm, pmd);
96549+ spin_lock(ptl);
96550 if (!pte_none(*page_table))
96551 goto unlock;
96552 goto setpte;
96553 }
96554
96555 /* Allocate our own private page. */
96556+ pte_unmap(page_table);
96557+
96558 if (unlikely(anon_vma_prepare(vma)))
96559 goto oom;
96560 page = alloc_zeroed_user_highpage_movable(vma, address);
96561@@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96562 if (!pte_none(*page_table))
96563 goto release;
96564
96565+#ifdef CONFIG_PAX_SEGMEXEC
96566+ if (pax_find_mirror_vma(vma))
96567+ BUG_ON(!trylock_page(page));
96568+#endif
96569+
96570 inc_mm_counter(mm, anon_rss);
96571 page_add_new_anon_rmap(page, vma, address);
96572 setpte:
96573@@ -2720,6 +2911,12 @@ setpte:
96574
96575 /* No need to invalidate - it was non-present before */
96576 update_mmu_cache(vma, address, entry);
96577+
96578+#ifdef CONFIG_PAX_SEGMEXEC
96579+ if (page)
96580+ pax_mirror_anon_pte(vma, address, page, ptl);
96581+#endif
96582+
96583 unlock:
96584 pte_unmap_unlock(page_table, ptl);
96585 return 0;
96586@@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96587 */
96588 /* Only go through if we didn't race with anybody else... */
96589 if (likely(pte_same(*page_table, orig_pte))) {
96590+
96591+#ifdef CONFIG_PAX_SEGMEXEC
96592+ if (anon && pax_find_mirror_vma(vma))
96593+ BUG_ON(!trylock_page(page));
96594+#endif
96595+
96596 flush_icache_page(vma, page);
96597 entry = mk_pte(page, vma->vm_page_prot);
96598 if (flags & FAULT_FLAG_WRITE)
96599@@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96600
96601 /* no need to invalidate: a not-present page won't be cached */
96602 update_mmu_cache(vma, address, entry);
96603+
96604+#ifdef CONFIG_PAX_SEGMEXEC
96605+ if (anon)
96606+ pax_mirror_anon_pte(vma, address, page, ptl);
96607+ else
96608+ pax_mirror_file_pte(vma, address, page, ptl);
96609+#endif
96610+
96611 } else {
96612 if (charged)
96613 mem_cgroup_uncharge_page(page);
96614@@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
96615 if (flags & FAULT_FLAG_WRITE)
96616 flush_tlb_page(vma, address);
96617 }
96618+
96619+#ifdef CONFIG_PAX_SEGMEXEC
96620+ pax_mirror_pte(vma, address, pte, pmd, ptl);
96621+ return 0;
96622+#endif
96623+
96624 unlock:
96625 pte_unmap_unlock(pte, ptl);
96626 return 0;
96627@@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96628 pmd_t *pmd;
96629 pte_t *pte;
96630
96631+#ifdef CONFIG_PAX_SEGMEXEC
96632+ struct vm_area_struct *vma_m;
96633+#endif
96634+
96635 __set_current_state(TASK_RUNNING);
96636
96637 count_vm_event(PGFAULT);
96638@@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96639 if (unlikely(is_vm_hugetlb_page(vma)))
96640 return hugetlb_fault(mm, vma, address, flags);
96641
96642+#ifdef CONFIG_PAX_SEGMEXEC
96643+ vma_m = pax_find_mirror_vma(vma);
96644+ if (vma_m) {
96645+ unsigned long address_m;
96646+ pgd_t *pgd_m;
96647+ pud_t *pud_m;
96648+ pmd_t *pmd_m;
96649+
96650+ if (vma->vm_start > vma_m->vm_start) {
96651+ address_m = address;
96652+ address -= SEGMEXEC_TASK_SIZE;
96653+ vma = vma_m;
96654+ } else
96655+ address_m = address + SEGMEXEC_TASK_SIZE;
96656+
96657+ pgd_m = pgd_offset(mm, address_m);
96658+ pud_m = pud_alloc(mm, pgd_m, address_m);
96659+ if (!pud_m)
96660+ return VM_FAULT_OOM;
96661+ pmd_m = pmd_alloc(mm, pud_m, address_m);
96662+ if (!pmd_m)
96663+ return VM_FAULT_OOM;
96664+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
96665+ return VM_FAULT_OOM;
96666+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
96667+ }
96668+#endif
96669+
96670 pgd = pgd_offset(mm, address);
96671 pud = pud_alloc(mm, pgd, address);
96672 if (!pud)
96673@@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
96674 gate_vma.vm_start = FIXADDR_USER_START;
96675 gate_vma.vm_end = FIXADDR_USER_END;
96676 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
96677- gate_vma.vm_page_prot = __P101;
96678+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
96679 /*
96680 * Make sure the vDSO gets into every core dump.
96681 * Dumping its contents makes post-mortem fully interpretable later
96682diff --git a/mm/mempolicy.c b/mm/mempolicy.c
96683index 3c6e3e2..b1ddbb8 100644
96684--- a/mm/mempolicy.c
96685+++ b/mm/mempolicy.c
96686@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
96687 struct vm_area_struct *next;
96688 int err;
96689
96690+#ifdef CONFIG_PAX_SEGMEXEC
96691+ struct vm_area_struct *vma_m;
96692+#endif
96693+
96694 err = 0;
96695 for (; vma && vma->vm_start < end; vma = next) {
96696 next = vma->vm_next;
96697@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
96698 err = policy_vma(vma, new);
96699 if (err)
96700 break;
96701+
96702+#ifdef CONFIG_PAX_SEGMEXEC
96703+ vma_m = pax_find_mirror_vma(vma);
96704+ if (vma_m) {
96705+ err = policy_vma(vma_m, new);
96706+ if (err)
96707+ break;
96708+ }
96709+#endif
96710+
96711 }
96712 return err;
96713 }
96714@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len,
96715
96716 if (end < start)
96717 return -EINVAL;
96718+
96719+#ifdef CONFIG_PAX_SEGMEXEC
96720+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
96721+ if (end > SEGMEXEC_TASK_SIZE)
96722+ return -EINVAL;
96723+ } else
96724+#endif
96725+
96726+ if (end > TASK_SIZE)
96727+ return -EINVAL;
96728+
96729 if (end == start)
96730 return 0;
96731
96732@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96733 if (!mm)
96734 return -EINVAL;
96735
96736+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96737+ if (mm != current->mm &&
96738+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
96739+ err = -EPERM;
96740+ goto out;
96741+ }
96742+#endif
96743+
96744 /*
96745 * Check if this process has the right to modify the specified
96746 * process. The right exists if the process has administrative
96747@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96748 rcu_read_lock();
96749 tcred = __task_cred(task);
96750 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
96751- cred->uid != tcred->suid && cred->uid != tcred->uid &&
96752- !capable(CAP_SYS_NICE)) {
96753+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
96754 rcu_read_unlock();
96755 err = -EPERM;
96756 goto out;
96757@@ -2367,6 +2399,12 @@ static inline void check_huge_range(struct vm_area_struct *vma,
96758 }
96759 #endif
96760
96761+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96762+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
96763+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
96764+ _mm->pax_flags & MF_PAX_SEGMEXEC))
96765+#endif
96766+
96767 /*
96768 * Display pages allocated per node and memory policy via /proc.
96769 */
96770@@ -2381,6 +2419,13 @@ int show_numa_map(struct seq_file *m, void *v)
96771 int n;
96772 char buffer[50];
96773
96774+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96775+ if (current->exec_id != m->exec_id) {
96776+ gr_log_badprocpid("numa_maps");
96777+ return 0;
96778+ }
96779+#endif
96780+
96781 if (!mm)
96782 return 0;
96783
96784@@ -2392,11 +2437,15 @@ int show_numa_map(struct seq_file *m, void *v)
96785 mpol_to_str(buffer, sizeof(buffer), pol, 0);
96786 mpol_cond_put(pol);
96787
96788+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96789+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
96790+#else
96791 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
96792+#endif
96793
96794 if (file) {
96795 seq_printf(m, " file=");
96796- seq_path(m, &file->f_path, "\n\t= ");
96797+ seq_path(m, &file->f_path, "\n\t\\= ");
96798 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
96799 seq_printf(m, " heap");
96800 } else if (vma->vm_start <= mm->start_stack &&
96801diff --git a/mm/migrate.c b/mm/migrate.c
96802index aaca868..2ebecdc 100644
96803--- a/mm/migrate.c
96804+++ b/mm/migrate.c
96805@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
96806 unsigned long chunk_start;
96807 int err;
96808
96809+ pax_track_stack();
96810+
96811 task_nodes = cpuset_mems_allowed(task);
96812
96813 err = -ENOMEM;
96814@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
96815 if (!mm)
96816 return -EINVAL;
96817
96818+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96819+ if (mm != current->mm &&
96820+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
96821+ err = -EPERM;
96822+ goto out;
96823+ }
96824+#endif
96825+
96826 /*
96827 * Check if this process has the right to modify the specified
96828 * process. The right exists if the process has administrative
96829@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
96830 rcu_read_lock();
96831 tcred = __task_cred(task);
96832 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
96833- cred->uid != tcred->suid && cred->uid != tcred->uid &&
96834- !capable(CAP_SYS_NICE)) {
96835+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
96836 rcu_read_unlock();
96837 err = -EPERM;
96838 goto out;
96839diff --git a/mm/mlock.c b/mm/mlock.c
96840index 2d846cf..98134d2 100644
96841--- a/mm/mlock.c
96842+++ b/mm/mlock.c
96843@@ -13,6 +13,7 @@
96844 #include <linux/pagemap.h>
96845 #include <linux/mempolicy.h>
96846 #include <linux/syscalls.h>
96847+#include <linux/security.h>
96848 #include <linux/sched.h>
96849 #include <linux/module.h>
96850 #include <linux/rmap.h>
96851@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
96852 }
96853 }
96854
96855-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
96856-{
96857- return (vma->vm_flags & VM_GROWSDOWN) &&
96858- (vma->vm_start == addr) &&
96859- !vma_stack_continue(vma->vm_prev, addr);
96860-}
96861-
96862 /**
96863 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
96864 * @vma: target vma
96865@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
96866 if (vma->vm_flags & VM_WRITE)
96867 gup_flags |= FOLL_WRITE;
96868
96869- /* We don't try to access the guard page of a stack vma */
96870- if (stack_guard_page(vma, start)) {
96871- addr += PAGE_SIZE;
96872- nr_pages--;
96873- }
96874-
96875 while (nr_pages > 0) {
96876 int i;
96877
96878@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
96879 {
96880 unsigned long nstart, end, tmp;
96881 struct vm_area_struct * vma, * prev;
96882- int error;
96883+ int error = -EINVAL;
96884
96885 len = PAGE_ALIGN(len);
96886 end = start + len;
96887@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
96888 return -EINVAL;
96889 if (end == start)
96890 return 0;
96891+ if (end > TASK_SIZE)
96892+ return -EINVAL;
96893+
96894 vma = find_vma_prev(current->mm, start, &prev);
96895 if (!vma || vma->vm_start > start)
96896 return -ENOMEM;
96897@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
96898 for (nstart = start ; ; ) {
96899 unsigned int newflags;
96900
96901+#ifdef CONFIG_PAX_SEGMEXEC
96902+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96903+ break;
96904+#endif
96905+
96906 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
96907
96908 newflags = vma->vm_flags | VM_LOCKED;
96909@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
96910 lock_limit >>= PAGE_SHIFT;
96911
96912 /* check against resource limits */
96913+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
96914 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
96915 error = do_mlock(start, len, 1);
96916 up_write(&current->mm->mmap_sem);
96917@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
96918 static int do_mlockall(int flags)
96919 {
96920 struct vm_area_struct * vma, * prev = NULL;
96921- unsigned int def_flags = 0;
96922
96923 if (flags & MCL_FUTURE)
96924- def_flags = VM_LOCKED;
96925- current->mm->def_flags = def_flags;
96926+ current->mm->def_flags |= VM_LOCKED;
96927+ else
96928+ current->mm->def_flags &= ~VM_LOCKED;
96929 if (flags == MCL_FUTURE)
96930 goto out;
96931
96932 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
96933- unsigned int newflags;
96934+ unsigned long newflags;
96935
96936+#ifdef CONFIG_PAX_SEGMEXEC
96937+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96938+ break;
96939+#endif
96940+
96941+ BUG_ON(vma->vm_end > TASK_SIZE);
96942 newflags = vma->vm_flags | VM_LOCKED;
96943 if (!(flags & MCL_CURRENT))
96944 newflags &= ~VM_LOCKED;
96945@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
96946 lock_limit >>= PAGE_SHIFT;
96947
96948 ret = -ENOMEM;
96949+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
96950 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
96951 capable(CAP_IPC_LOCK))
96952 ret = do_mlockall(flags);
96953diff --git a/mm/mmap.c b/mm/mmap.c
96954index 4b80cbf..12a7861 100644
96955--- a/mm/mmap.c
96956+++ b/mm/mmap.c
96957@@ -45,6 +45,16 @@
96958 #define arch_rebalance_pgtables(addr, len) (addr)
96959 #endif
96960
96961+static inline void verify_mm_writelocked(struct mm_struct *mm)
96962+{
96963+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
96964+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
96965+ up_read(&mm->mmap_sem);
96966+ BUG();
96967+ }
96968+#endif
96969+}
96970+
96971 static void unmap_region(struct mm_struct *mm,
96972 struct vm_area_struct *vma, struct vm_area_struct *prev,
96973 unsigned long start, unsigned long end);
96974@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
96975 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
96976 *
96977 */
96978-pgprot_t protection_map[16] = {
96979+pgprot_t protection_map[16] __read_only = {
96980 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
96981 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
96982 };
96983
96984 pgprot_t vm_get_page_prot(unsigned long vm_flags)
96985 {
96986- return __pgprot(pgprot_val(protection_map[vm_flags &
96987+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
96988 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
96989 pgprot_val(arch_vm_get_page_prot(vm_flags)));
96990+
96991+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
96992+ if (!nx_enabled &&
96993+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
96994+ (vm_flags & (VM_READ | VM_WRITE)))
96995+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
96996+#endif
96997+
96998+ return prot;
96999 }
97000 EXPORT_SYMBOL(vm_get_page_prot);
97001
97002 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
97003 int sysctl_overcommit_ratio = 50; /* default is 50% */
97004 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
97005+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
97006 struct percpu_counter vm_committed_as;
97007
97008 /*
97009@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
97010 struct vm_area_struct *next = vma->vm_next;
97011
97012 might_sleep();
97013+ BUG_ON(vma->vm_mirror);
97014 if (vma->vm_ops && vma->vm_ops->close)
97015 vma->vm_ops->close(vma);
97016 if (vma->vm_file) {
97017@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
97018 * not page aligned -Ram Gupta
97019 */
97020 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
97021+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
97022 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
97023 (mm->end_data - mm->start_data) > rlim)
97024 goto out;
97025@@ -704,6 +726,12 @@ static int
97026 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
97027 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
97028 {
97029+
97030+#ifdef CONFIG_PAX_SEGMEXEC
97031+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
97032+ return 0;
97033+#endif
97034+
97035 if (is_mergeable_vma(vma, file, vm_flags) &&
97036 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
97037 if (vma->vm_pgoff == vm_pgoff)
97038@@ -723,6 +751,12 @@ static int
97039 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
97040 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
97041 {
97042+
97043+#ifdef CONFIG_PAX_SEGMEXEC
97044+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
97045+ return 0;
97046+#endif
97047+
97048 if (is_mergeable_vma(vma, file, vm_flags) &&
97049 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
97050 pgoff_t vm_pglen;
97051@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
97052 struct vm_area_struct *vma_merge(struct mm_struct *mm,
97053 struct vm_area_struct *prev, unsigned long addr,
97054 unsigned long end, unsigned long vm_flags,
97055- struct anon_vma *anon_vma, struct file *file,
97056+ struct anon_vma *anon_vma, struct file *file,
97057 pgoff_t pgoff, struct mempolicy *policy)
97058 {
97059 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
97060 struct vm_area_struct *area, *next;
97061
97062+#ifdef CONFIG_PAX_SEGMEXEC
97063+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
97064+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
97065+
97066+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
97067+#endif
97068+
97069 /*
97070 * We later require that vma->vm_flags == vm_flags,
97071 * so this tests vma->vm_flags & VM_SPECIAL, too.
97072@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97073 if (next && next->vm_end == end) /* cases 6, 7, 8 */
97074 next = next->vm_next;
97075
97076+#ifdef CONFIG_PAX_SEGMEXEC
97077+ if (prev)
97078+ prev_m = pax_find_mirror_vma(prev);
97079+ if (area)
97080+ area_m = pax_find_mirror_vma(area);
97081+ if (next)
97082+ next_m = pax_find_mirror_vma(next);
97083+#endif
97084+
97085 /*
97086 * Can it merge with the predecessor?
97087 */
97088@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97089 /* cases 1, 6 */
97090 vma_adjust(prev, prev->vm_start,
97091 next->vm_end, prev->vm_pgoff, NULL);
97092- } else /* cases 2, 5, 7 */
97093+
97094+#ifdef CONFIG_PAX_SEGMEXEC
97095+ if (prev_m)
97096+ vma_adjust(prev_m, prev_m->vm_start,
97097+ next_m->vm_end, prev_m->vm_pgoff, NULL);
97098+#endif
97099+
97100+ } else { /* cases 2, 5, 7 */
97101 vma_adjust(prev, prev->vm_start,
97102 end, prev->vm_pgoff, NULL);
97103+
97104+#ifdef CONFIG_PAX_SEGMEXEC
97105+ if (prev_m)
97106+ vma_adjust(prev_m, prev_m->vm_start,
97107+ end_m, prev_m->vm_pgoff, NULL);
97108+#endif
97109+
97110+ }
97111 return prev;
97112 }
97113
97114@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97115 mpol_equal(policy, vma_policy(next)) &&
97116 can_vma_merge_before(next, vm_flags,
97117 anon_vma, file, pgoff+pglen)) {
97118- if (prev && addr < prev->vm_end) /* case 4 */
97119+ if (prev && addr < prev->vm_end) { /* case 4 */
97120 vma_adjust(prev, prev->vm_start,
97121 addr, prev->vm_pgoff, NULL);
97122- else /* cases 3, 8 */
97123+
97124+#ifdef CONFIG_PAX_SEGMEXEC
97125+ if (prev_m)
97126+ vma_adjust(prev_m, prev_m->vm_start,
97127+ addr_m, prev_m->vm_pgoff, NULL);
97128+#endif
97129+
97130+ } else { /* cases 3, 8 */
97131 vma_adjust(area, addr, next->vm_end,
97132 next->vm_pgoff - pglen, NULL);
97133+
97134+#ifdef CONFIG_PAX_SEGMEXEC
97135+ if (area_m)
97136+ vma_adjust(area_m, addr_m, next_m->vm_end,
97137+ next_m->vm_pgoff - pglen, NULL);
97138+#endif
97139+
97140+ }
97141 return area;
97142 }
97143
97144@@ -898,14 +978,11 @@ none:
97145 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97146 struct file *file, long pages)
97147 {
97148- const unsigned long stack_flags
97149- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
97150-
97151 if (file) {
97152 mm->shared_vm += pages;
97153 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
97154 mm->exec_vm += pages;
97155- } else if (flags & stack_flags)
97156+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
97157 mm->stack_vm += pages;
97158 if (flags & (VM_RESERVED|VM_IO))
97159 mm->reserved_vm += pages;
97160@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97161 * (the exception is when the underlying filesystem is noexec
97162 * mounted, in which case we dont add PROT_EXEC.)
97163 */
97164- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
97165+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
97166 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
97167 prot |= PROT_EXEC;
97168
97169@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97170 /* Obtain the address to map to. we verify (or select) it and ensure
97171 * that it represents a valid section of the address space.
97172 */
97173- addr = get_unmapped_area(file, addr, len, pgoff, flags);
97174+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
97175 if (addr & ~PAGE_MASK)
97176 return addr;
97177
97178@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97179 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
97180 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
97181
97182+#ifdef CONFIG_PAX_MPROTECT
97183+ if (mm->pax_flags & MF_PAX_MPROTECT) {
97184+#ifndef CONFIG_PAX_MPROTECT_COMPAT
97185+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
97186+ gr_log_rwxmmap(file);
97187+
97188+#ifdef CONFIG_PAX_EMUPLT
97189+ vm_flags &= ~VM_EXEC;
97190+#else
97191+ return -EPERM;
97192+#endif
97193+
97194+ }
97195+
97196+ if (!(vm_flags & VM_EXEC))
97197+ vm_flags &= ~VM_MAYEXEC;
97198+#else
97199+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
97200+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
97201+#endif
97202+ else
97203+ vm_flags &= ~VM_MAYWRITE;
97204+ }
97205+#endif
97206+
97207+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97208+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
97209+ vm_flags &= ~VM_PAGEEXEC;
97210+#endif
97211+
97212 if (flags & MAP_LOCKED)
97213 if (!can_do_mlock())
97214 return -EPERM;
97215@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97216 locked += mm->locked_vm;
97217 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
97218 lock_limit >>= PAGE_SHIFT;
97219+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97220 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
97221 return -EAGAIN;
97222 }
97223@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97224 if (error)
97225 return error;
97226
97227+ if (!gr_acl_handle_mmap(file, prot))
97228+ return -EACCES;
97229+
97230 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
97231 }
97232 EXPORT_SYMBOL(do_mmap_pgoff);
97233@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
97234 */
97235 int vma_wants_writenotify(struct vm_area_struct *vma)
97236 {
97237- unsigned int vm_flags = vma->vm_flags;
97238+ unsigned long vm_flags = vma->vm_flags;
97239
97240 /* If it was private or non-writable, the write bit is already clear */
97241- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
97242+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
97243 return 0;
97244
97245 /* The backer wishes to know when pages are first written to? */
97246@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97247 unsigned long charged = 0;
97248 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
97249
97250+#ifdef CONFIG_PAX_SEGMEXEC
97251+ struct vm_area_struct *vma_m = NULL;
97252+#endif
97253+
97254+ /*
97255+ * mm->mmap_sem is required to protect against another thread
97256+ * changing the mappings in case we sleep.
97257+ */
97258+ verify_mm_writelocked(mm);
97259+
97260 /* Clear old maps */
97261 error = -ENOMEM;
97262-munmap_back:
97263 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
97264 if (vma && vma->vm_start < addr + len) {
97265 if (do_munmap(mm, addr, len))
97266 return -ENOMEM;
97267- goto munmap_back;
97268+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
97269+ BUG_ON(vma && vma->vm_start < addr + len);
97270 }
97271
97272 /* Check against address space limit. */
97273@@ -1173,6 +1294,16 @@ munmap_back:
97274 goto unacct_error;
97275 }
97276
97277+#ifdef CONFIG_PAX_SEGMEXEC
97278+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
97279+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97280+ if (!vma_m) {
97281+ error = -ENOMEM;
97282+ goto free_vma;
97283+ }
97284+ }
97285+#endif
97286+
97287 vma->vm_mm = mm;
97288 vma->vm_start = addr;
97289 vma->vm_end = addr + len;
97290@@ -1180,8 +1311,9 @@ munmap_back:
97291 vma->vm_page_prot = vm_get_page_prot(vm_flags);
97292 vma->vm_pgoff = pgoff;
97293
97294+ error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
97295+
97296 if (file) {
97297- error = -EINVAL;
97298 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
97299 goto free_vma;
97300 if (vm_flags & VM_DENYWRITE) {
97301@@ -1195,6 +1327,19 @@ munmap_back:
97302 error = file->f_op->mmap(file, vma);
97303 if (error)
97304 goto unmap_and_free_vma;
97305+
97306+#ifdef CONFIG_PAX_SEGMEXEC
97307+ if (vma_m && (vm_flags & VM_EXECUTABLE))
97308+ added_exe_file_vma(mm);
97309+#endif
97310+
97311+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97312+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
97313+ vma->vm_flags |= VM_PAGEEXEC;
97314+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
97315+ }
97316+#endif
97317+
97318 if (vm_flags & VM_EXECUTABLE)
97319 added_exe_file_vma(mm);
97320
97321@@ -1207,6 +1352,8 @@ munmap_back:
97322 pgoff = vma->vm_pgoff;
97323 vm_flags = vma->vm_flags;
97324 } else if (vm_flags & VM_SHARED) {
97325+ if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
97326+ goto free_vma;
97327 error = shmem_zero_setup(vma);
97328 if (error)
97329 goto free_vma;
97330@@ -1218,6 +1365,11 @@ munmap_back:
97331 vma_link(mm, vma, prev, rb_link, rb_parent);
97332 file = vma->vm_file;
97333
97334+#ifdef CONFIG_PAX_SEGMEXEC
97335+ if (vma_m)
97336+ pax_mirror_vma(vma_m, vma);
97337+#endif
97338+
97339 /* Once vma denies write, undo our temporary denial count */
97340 if (correct_wcount)
97341 atomic_inc(&inode->i_writecount);
97342@@ -1226,6 +1378,7 @@ out:
97343
97344 mm->total_vm += len >> PAGE_SHIFT;
97345 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
97346+ track_exec_limit(mm, addr, addr + len, vm_flags);
97347 if (vm_flags & VM_LOCKED) {
97348 /*
97349 * makes pages present; downgrades, drops, reacquires mmap_sem
97350@@ -1248,6 +1401,12 @@ unmap_and_free_vma:
97351 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
97352 charged = 0;
97353 free_vma:
97354+
97355+#ifdef CONFIG_PAX_SEGMEXEC
97356+ if (vma_m)
97357+ kmem_cache_free(vm_area_cachep, vma_m);
97358+#endif
97359+
97360 kmem_cache_free(vm_area_cachep, vma);
97361 unacct_error:
97362 if (charged)
97363@@ -1255,6 +1414,44 @@ unacct_error:
97364 return error;
97365 }
97366
97367+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
97368+{
97369+ if (!vma) {
97370+#ifdef CONFIG_STACK_GROWSUP
97371+ if (addr > sysctl_heap_stack_gap)
97372+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
97373+ else
97374+ vma = find_vma(current->mm, 0);
97375+ if (vma && (vma->vm_flags & VM_GROWSUP))
97376+ return false;
97377+#endif
97378+ return true;
97379+ }
97380+
97381+ if (addr + len > vma->vm_start)
97382+ return false;
97383+
97384+ if (vma->vm_flags & VM_GROWSDOWN)
97385+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
97386+#ifdef CONFIG_STACK_GROWSUP
97387+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
97388+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
97389+#endif
97390+
97391+ return true;
97392+}
97393+
97394+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
97395+{
97396+ if (vma->vm_start < len)
97397+ return -ENOMEM;
97398+ if (!(vma->vm_flags & VM_GROWSDOWN))
97399+ return vma->vm_start - len;
97400+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
97401+ return vma->vm_start - len - sysctl_heap_stack_gap;
97402+ return -ENOMEM;
97403+}
97404+
97405 /* Get an address range which is currently unmapped.
97406 * For shmat() with addr=0.
97407 *
97408@@ -1281,18 +1478,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97409 if (flags & MAP_FIXED)
97410 return addr;
97411
97412+#ifdef CONFIG_PAX_RANDMMAP
97413+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97414+#endif
97415+
97416 if (addr) {
97417 addr = PAGE_ALIGN(addr);
97418- vma = find_vma(mm, addr);
97419- if (TASK_SIZE - len >= addr &&
97420- (!vma || addr + len <= vma->vm_start))
97421- return addr;
97422+ if (TASK_SIZE - len >= addr) {
97423+ vma = find_vma(mm, addr);
97424+ if (check_heap_stack_gap(vma, addr, len))
97425+ return addr;
97426+ }
97427 }
97428 if (len > mm->cached_hole_size) {
97429- start_addr = addr = mm->free_area_cache;
97430+ start_addr = addr = mm->free_area_cache;
97431 } else {
97432- start_addr = addr = TASK_UNMAPPED_BASE;
97433- mm->cached_hole_size = 0;
97434+ start_addr = addr = mm->mmap_base;
97435+ mm->cached_hole_size = 0;
97436 }
97437
97438 full_search:
97439@@ -1303,34 +1505,40 @@ full_search:
97440 * Start a new search - just in case we missed
97441 * some holes.
97442 */
97443- if (start_addr != TASK_UNMAPPED_BASE) {
97444- addr = TASK_UNMAPPED_BASE;
97445- start_addr = addr;
97446+ if (start_addr != mm->mmap_base) {
97447+ start_addr = addr = mm->mmap_base;
97448 mm->cached_hole_size = 0;
97449 goto full_search;
97450 }
97451 return -ENOMEM;
97452 }
97453- if (!vma || addr + len <= vma->vm_start) {
97454- /*
97455- * Remember the place where we stopped the search:
97456- */
97457- mm->free_area_cache = addr + len;
97458- return addr;
97459- }
97460+ if (check_heap_stack_gap(vma, addr, len))
97461+ break;
97462 if (addr + mm->cached_hole_size < vma->vm_start)
97463 mm->cached_hole_size = vma->vm_start - addr;
97464 addr = vma->vm_end;
97465 }
97466+
97467+ /*
97468+ * Remember the place where we stopped the search:
97469+ */
97470+ mm->free_area_cache = addr + len;
97471+ return addr;
97472 }
97473 #endif
97474
97475 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
97476 {
97477+
97478+#ifdef CONFIG_PAX_SEGMEXEC
97479+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
97480+ return;
97481+#endif
97482+
97483 /*
97484 * Is this a new hole at the lowest possible address?
97485 */
97486- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
97487+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
97488 mm->free_area_cache = addr;
97489 mm->cached_hole_size = ~0UL;
97490 }
97491@@ -1348,7 +1556,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97492 {
97493 struct vm_area_struct *vma;
97494 struct mm_struct *mm = current->mm;
97495- unsigned long addr = addr0;
97496+ unsigned long base = mm->mmap_base, addr = addr0;
97497
97498 /* requested length too big for entire address space */
97499 if (len > TASK_SIZE)
97500@@ -1357,13 +1565,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97501 if (flags & MAP_FIXED)
97502 return addr;
97503
97504+#ifdef CONFIG_PAX_RANDMMAP
97505+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97506+#endif
97507+
97508 /* requesting a specific address */
97509 if (addr) {
97510 addr = PAGE_ALIGN(addr);
97511- vma = find_vma(mm, addr);
97512- if (TASK_SIZE - len >= addr &&
97513- (!vma || addr + len <= vma->vm_start))
97514- return addr;
97515+ if (TASK_SIZE - len >= addr) {
97516+ vma = find_vma(mm, addr);
97517+ if (check_heap_stack_gap(vma, addr, len))
97518+ return addr;
97519+ }
97520 }
97521
97522 /* check if free_area_cache is useful for us */
97523@@ -1378,7 +1591,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97524 /* make sure it can fit in the remaining address space */
97525 if (addr > len) {
97526 vma = find_vma(mm, addr-len);
97527- if (!vma || addr <= vma->vm_start)
97528+ if (check_heap_stack_gap(vma, addr - len, len))
97529 /* remember the address as a hint for next time */
97530 return (mm->free_area_cache = addr-len);
97531 }
97532@@ -1395,7 +1608,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97533 * return with success:
97534 */
97535 vma = find_vma(mm, addr);
97536- if (!vma || addr+len <= vma->vm_start)
97537+ if (check_heap_stack_gap(vma, addr, len))
97538 /* remember the address as a hint for next time */
97539 return (mm->free_area_cache = addr);
97540
97541@@ -1404,8 +1617,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97542 mm->cached_hole_size = vma->vm_start - addr;
97543
97544 /* try just below the current vma->vm_start */
97545- addr = vma->vm_start-len;
97546- } while (len < vma->vm_start);
97547+ addr = skip_heap_stack_gap(vma, len);
97548+ } while (!IS_ERR_VALUE(addr));
97549
97550 bottomup:
97551 /*
97552@@ -1414,13 +1627,21 @@ bottomup:
97553 * can happen with large stack limits and large mmap()
97554 * allocations.
97555 */
97556+ mm->mmap_base = TASK_UNMAPPED_BASE;
97557+
97558+#ifdef CONFIG_PAX_RANDMMAP
97559+ if (mm->pax_flags & MF_PAX_RANDMMAP)
97560+ mm->mmap_base += mm->delta_mmap;
97561+#endif
97562+
97563+ mm->free_area_cache = mm->mmap_base;
97564 mm->cached_hole_size = ~0UL;
97565- mm->free_area_cache = TASK_UNMAPPED_BASE;
97566 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
97567 /*
97568 * Restore the topdown base:
97569 */
97570- mm->free_area_cache = mm->mmap_base;
97571+ mm->mmap_base = base;
97572+ mm->free_area_cache = base;
97573 mm->cached_hole_size = ~0UL;
97574
97575 return addr;
97576@@ -1429,6 +1650,12 @@ bottomup:
97577
97578 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
97579 {
97580+
97581+#ifdef CONFIG_PAX_SEGMEXEC
97582+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
97583+ return;
97584+#endif
97585+
97586 /*
97587 * Is this a new hole at the highest possible address?
97588 */
97589@@ -1436,8 +1663,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
97590 mm->free_area_cache = addr;
97591
97592 /* dont allow allocations above current base */
97593- if (mm->free_area_cache > mm->mmap_base)
97594+ if (mm->free_area_cache > mm->mmap_base) {
97595 mm->free_area_cache = mm->mmap_base;
97596+ mm->cached_hole_size = ~0UL;
97597+ }
97598 }
97599
97600 unsigned long
97601@@ -1510,40 +1739,49 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
97602
97603 EXPORT_SYMBOL(find_vma);
97604
97605-/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
97606+/*
97607+ * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
97608+ */
97609 struct vm_area_struct *
97610 find_vma_prev(struct mm_struct *mm, unsigned long addr,
97611 struct vm_area_struct **pprev)
97612 {
97613- struct vm_area_struct *vma = NULL, *prev = NULL;
97614- struct rb_node *rb_node;
97615- if (!mm)
97616- goto out;
97617-
97618- /* Guard against addr being lower than the first VMA */
97619- vma = mm->mmap;
97620-
97621- /* Go through the RB tree quickly. */
97622- rb_node = mm->mm_rb.rb_node;
97623-
97624- while (rb_node) {
97625- struct vm_area_struct *vma_tmp;
97626- vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
97627-
97628- if (addr < vma_tmp->vm_end) {
97629- rb_node = rb_node->rb_left;
97630- } else {
97631- prev = vma_tmp;
97632- if (!prev->vm_next || (addr < prev->vm_next->vm_end))
97633- break;
97634+ struct vm_area_struct *vma;
97635+
97636+ vma = find_vma(mm, addr);
97637+ if (vma) {
97638+ *pprev = vma->vm_prev;
97639+ } else {
97640+ struct rb_node *rb_node = mm->mm_rb.rb_node;
97641+ *pprev = NULL;
97642+ while (rb_node) {
97643+ *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
97644 rb_node = rb_node->rb_right;
97645 }
97646 }
97647+ return vma;
97648+}
97649+
97650+#ifdef CONFIG_PAX_SEGMEXEC
97651+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
97652+{
97653+ struct vm_area_struct *vma_m;
97654
97655-out:
97656- *pprev = prev;
97657- return prev ? prev->vm_next : vma;
97658+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
97659+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
97660+ BUG_ON(vma->vm_mirror);
97661+ return NULL;
97662+ }
97663+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
97664+ vma_m = vma->vm_mirror;
97665+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
97666+ BUG_ON(vma->vm_file != vma_m->vm_file);
97667+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
97668+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
97669+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
97670+ return vma_m;
97671 }
97672+#endif
97673
97674 /*
97675 * Verify that the stack growth is acceptable and
97676@@ -1561,6 +1799,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97677 return -ENOMEM;
97678
97679 /* Stack limit test */
97680+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
97681 if (size > rlim[RLIMIT_STACK].rlim_cur)
97682 return -ENOMEM;
97683
97684@@ -1570,6 +1809,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97685 unsigned long limit;
97686 locked = mm->locked_vm + grow;
97687 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
97688+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97689 if (locked > limit && !capable(CAP_IPC_LOCK))
97690 return -ENOMEM;
97691 }
97692@@ -1600,37 +1840,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97693 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
97694 * vma is the last one with address > vma->vm_end. Have to extend vma.
97695 */
97696+#ifndef CONFIG_IA64
97697+static
97698+#endif
97699 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97700 {
97701 int error;
97702+ bool locknext;
97703
97704 if (!(vma->vm_flags & VM_GROWSUP))
97705 return -EFAULT;
97706
97707+ /* Also guard against wrapping around to address 0. */
97708+ if (address < PAGE_ALIGN(address+1))
97709+ address = PAGE_ALIGN(address+1);
97710+ else
97711+ return -ENOMEM;
97712+
97713 /*
97714 * We must make sure the anon_vma is allocated
97715 * so that the anon_vma locking is not a noop.
97716 */
97717 if (unlikely(anon_vma_prepare(vma)))
97718 return -ENOMEM;
97719+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
97720+ if (locknext && anon_vma_prepare(vma->vm_next))
97721+ return -ENOMEM;
97722 anon_vma_lock(vma);
97723+ if (locknext)
97724+ anon_vma_lock(vma->vm_next);
97725
97726 /*
97727 * vma->vm_start/vm_end cannot change under us because the caller
97728 * is required to hold the mmap_sem in read mode. We need the
97729- * anon_vma lock to serialize against concurrent expand_stacks.
97730- * Also guard against wrapping around to address 0.
97731+ * anon_vma locks to serialize against concurrent expand_stacks
97732+ * and expand_upwards.
97733 */
97734- if (address < PAGE_ALIGN(address+4))
97735- address = PAGE_ALIGN(address+4);
97736- else {
97737- anon_vma_unlock(vma);
97738- return -ENOMEM;
97739- }
97740 error = 0;
97741
97742 /* Somebody else might have raced and expanded it already */
97743- if (address > vma->vm_end) {
97744+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
97745+ error = -ENOMEM;
97746+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
97747 unsigned long size, grow;
97748
97749 size = address - vma->vm_start;
97750@@ -1643,6 +1894,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97751 vma->vm_end = address;
97752 }
97753 }
97754+ if (locknext)
97755+ anon_vma_unlock(vma->vm_next);
97756 anon_vma_unlock(vma);
97757 return error;
97758 }
97759@@ -1655,6 +1908,8 @@ static int expand_downwards(struct vm_area_struct *vma,
97760 unsigned long address)
97761 {
97762 int error;
97763+ bool lockprev = false;
97764+ struct vm_area_struct *prev;
97765
97766 /*
97767 * We must make sure the anon_vma is allocated
97768@@ -1668,6 +1923,15 @@ static int expand_downwards(struct vm_area_struct *vma,
97769 if (error)
97770 return error;
97771
97772+ prev = vma->vm_prev;
97773+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
97774+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
97775+#endif
97776+ if (lockprev && anon_vma_prepare(prev))
97777+ return -ENOMEM;
97778+ if (lockprev)
97779+ anon_vma_lock(prev);
97780+
97781 anon_vma_lock(vma);
97782
97783 /*
97784@@ -1677,9 +1941,17 @@ static int expand_downwards(struct vm_area_struct *vma,
97785 */
97786
97787 /* Somebody else might have raced and expanded it already */
97788- if (address < vma->vm_start) {
97789+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
97790+ error = -ENOMEM;
97791+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
97792 unsigned long size, grow;
97793
97794+#ifdef CONFIG_PAX_SEGMEXEC
97795+ struct vm_area_struct *vma_m;
97796+
97797+ vma_m = pax_find_mirror_vma(vma);
97798+#endif
97799+
97800 size = vma->vm_end - address;
97801 grow = (vma->vm_start - address) >> PAGE_SHIFT;
97802
97803@@ -1689,10 +1961,22 @@ static int expand_downwards(struct vm_area_struct *vma,
97804 if (!error) {
97805 vma->vm_start = address;
97806 vma->vm_pgoff -= grow;
97807+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
97808+
97809+#ifdef CONFIG_PAX_SEGMEXEC
97810+ if (vma_m) {
97811+ vma_m->vm_start -= grow << PAGE_SHIFT;
97812+ vma_m->vm_pgoff -= grow;
97813+ }
97814+#endif
97815+
97816+
97817 }
97818 }
97819 }
97820 anon_vma_unlock(vma);
97821+ if (lockprev)
97822+ anon_vma_unlock(prev);
97823 return error;
97824 }
97825
97826@@ -1768,6 +2052,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
97827 do {
97828 long nrpages = vma_pages(vma);
97829
97830+#ifdef CONFIG_PAX_SEGMEXEC
97831+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
97832+ vma = remove_vma(vma);
97833+ continue;
97834+ }
97835+#endif
97836+
97837 mm->total_vm -= nrpages;
97838 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
97839 vma = remove_vma(vma);
97840@@ -1813,6 +2104,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
97841 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
97842 vma->vm_prev = NULL;
97843 do {
97844+
97845+#ifdef CONFIG_PAX_SEGMEXEC
97846+ if (vma->vm_mirror) {
97847+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
97848+ vma->vm_mirror->vm_mirror = NULL;
97849+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
97850+ vma->vm_mirror = NULL;
97851+ }
97852+#endif
97853+
97854 rb_erase(&vma->vm_rb, &mm->mm_rb);
97855 mm->map_count--;
97856 tail_vma = vma;
97857@@ -1840,10 +2141,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97858 struct mempolicy *pol;
97859 struct vm_area_struct *new;
97860
97861+#ifdef CONFIG_PAX_SEGMEXEC
97862+ struct vm_area_struct *vma_m, *new_m = NULL;
97863+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
97864+#endif
97865+
97866 if (is_vm_hugetlb_page(vma) && (addr &
97867 ~(huge_page_mask(hstate_vma(vma)))))
97868 return -EINVAL;
97869
97870+#ifdef CONFIG_PAX_SEGMEXEC
97871+ vma_m = pax_find_mirror_vma(vma);
97872+
97873+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
97874+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
97875+ if (mm->map_count >= sysctl_max_map_count-1)
97876+ return -ENOMEM;
97877+ } else
97878+#endif
97879+
97880 if (mm->map_count >= sysctl_max_map_count)
97881 return -ENOMEM;
97882
97883@@ -1851,6 +2167,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97884 if (!new)
97885 return -ENOMEM;
97886
97887+#ifdef CONFIG_PAX_SEGMEXEC
97888+ if (vma_m) {
97889+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97890+ if (!new_m) {
97891+ kmem_cache_free(vm_area_cachep, new);
97892+ return -ENOMEM;
97893+ }
97894+ }
97895+#endif
97896+
97897 /* most fields are the same, copy all, and then fixup */
97898 *new = *vma;
97899
97900@@ -1861,8 +2187,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97901 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
97902 }
97903
97904+#ifdef CONFIG_PAX_SEGMEXEC
97905+ if (vma_m) {
97906+ *new_m = *vma_m;
97907+ new_m->vm_mirror = new;
97908+ new->vm_mirror = new_m;
97909+
97910+ if (new_below)
97911+ new_m->vm_end = addr_m;
97912+ else {
97913+ new_m->vm_start = addr_m;
97914+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
97915+ }
97916+ }
97917+#endif
97918+
97919 pol = mpol_dup(vma_policy(vma));
97920 if (IS_ERR(pol)) {
97921+
97922+#ifdef CONFIG_PAX_SEGMEXEC
97923+ if (new_m)
97924+ kmem_cache_free(vm_area_cachep, new_m);
97925+#endif
97926+
97927 kmem_cache_free(vm_area_cachep, new);
97928 return PTR_ERR(pol);
97929 }
97930@@ -1883,6 +2230,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97931 else
97932 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
97933
97934+#ifdef CONFIG_PAX_SEGMEXEC
97935+ if (vma_m) {
97936+ mpol_get(pol);
97937+ vma_set_policy(new_m, pol);
97938+
97939+ if (new_m->vm_file) {
97940+ get_file(new_m->vm_file);
97941+ if (vma_m->vm_flags & VM_EXECUTABLE)
97942+ added_exe_file_vma(mm);
97943+ }
97944+
97945+ if (new_m->vm_ops && new_m->vm_ops->open)
97946+ new_m->vm_ops->open(new_m);
97947+
97948+ if (new_below)
97949+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
97950+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
97951+ else
97952+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
97953+ }
97954+#endif
97955+
97956 return 0;
97957 }
97958
97959@@ -1891,11 +2260,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97960 * work. This now handles partial unmappings.
97961 * Jeremy Fitzhardinge <jeremy@goop.org>
97962 */
97963+#ifdef CONFIG_PAX_SEGMEXEC
97964 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97965 {
97966+ int ret = __do_munmap(mm, start, len);
97967+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
97968+ return ret;
97969+
97970+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
97971+}
97972+
97973+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97974+#else
97975+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97976+#endif
97977+{
97978 unsigned long end;
97979 struct vm_area_struct *vma, *prev, *last;
97980
97981+ /*
97982+ * mm->mmap_sem is required to protect against another thread
97983+ * changing the mappings in case we sleep.
97984+ */
97985+ verify_mm_writelocked(mm);
97986+
97987 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
97988 return -EINVAL;
97989
97990@@ -1959,6 +2347,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97991 /* Fix up all other VM information */
97992 remove_vma_list(mm, vma);
97993
97994+ track_exec_limit(mm, start, end, 0UL);
97995+
97996 return 0;
97997 }
97998
97999@@ -1971,22 +2361,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
98000
98001 profile_munmap(addr);
98002
98003+#ifdef CONFIG_PAX_SEGMEXEC
98004+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
98005+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
98006+ return -EINVAL;
98007+#endif
98008+
98009 down_write(&mm->mmap_sem);
98010 ret = do_munmap(mm, addr, len);
98011 up_write(&mm->mmap_sem);
98012 return ret;
98013 }
98014
98015-static inline void verify_mm_writelocked(struct mm_struct *mm)
98016-{
98017-#ifdef CONFIG_DEBUG_VM
98018- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
98019- WARN_ON(1);
98020- up_read(&mm->mmap_sem);
98021- }
98022-#endif
98023-}
98024-
98025 /*
98026 * this is really a simplified "do_mmap". it only handles
98027 * anonymous maps. eventually we may be able to do some
98028@@ -2000,6 +2386,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
98029 struct rb_node ** rb_link, * rb_parent;
98030 pgoff_t pgoff = addr >> PAGE_SHIFT;
98031 int error;
98032+ unsigned long charged;
98033
98034 len = PAGE_ALIGN(len);
98035 if (!len)
98036@@ -2011,16 +2398,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
98037
98038 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
98039
98040+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
98041+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
98042+ flags &= ~VM_EXEC;
98043+
98044+#ifdef CONFIG_PAX_MPROTECT
98045+ if (mm->pax_flags & MF_PAX_MPROTECT)
98046+ flags &= ~VM_MAYEXEC;
98047+#endif
98048+
98049+ }
98050+#endif
98051+
98052 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
98053 if (error & ~PAGE_MASK)
98054 return error;
98055
98056+ charged = len >> PAGE_SHIFT;
98057+
98058 /*
98059 * mlock MCL_FUTURE?
98060 */
98061 if (mm->def_flags & VM_LOCKED) {
98062 unsigned long locked, lock_limit;
98063- locked = len >> PAGE_SHIFT;
98064+ locked = charged;
98065 locked += mm->locked_vm;
98066 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
98067 lock_limit >>= PAGE_SHIFT;
98068@@ -2037,22 +2438,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
98069 /*
98070 * Clear old maps. this also does some error checking for us
98071 */
98072- munmap_back:
98073 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
98074 if (vma && vma->vm_start < addr + len) {
98075 if (do_munmap(mm, addr, len))
98076 return -ENOMEM;
98077- goto munmap_back;
98078+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
98079+ BUG_ON(vma && vma->vm_start < addr + len);
98080 }
98081
98082 /* Check against address space limits *after* clearing old maps... */
98083- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
98084+ if (!may_expand_vm(mm, charged))
98085 return -ENOMEM;
98086
98087 if (mm->map_count > sysctl_max_map_count)
98088 return -ENOMEM;
98089
98090- if (security_vm_enough_memory(len >> PAGE_SHIFT))
98091+ if (security_vm_enough_memory(charged))
98092 return -ENOMEM;
98093
98094 /* Can we just expand an old private anonymous mapping? */
98095@@ -2066,7 +2467,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
98096 */
98097 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98098 if (!vma) {
98099- vm_unacct_memory(len >> PAGE_SHIFT);
98100+ vm_unacct_memory(charged);
98101 return -ENOMEM;
98102 }
98103
98104@@ -2078,11 +2479,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
98105 vma->vm_page_prot = vm_get_page_prot(flags);
98106 vma_link(mm, vma, prev, rb_link, rb_parent);
98107 out:
98108- mm->total_vm += len >> PAGE_SHIFT;
98109+ mm->total_vm += charged;
98110 if (flags & VM_LOCKED) {
98111 if (!mlock_vma_pages_range(vma, addr, addr + len))
98112- mm->locked_vm += (len >> PAGE_SHIFT);
98113+ mm->locked_vm += charged;
98114 }
98115+ track_exec_limit(mm, addr, addr + len, flags);
98116 return addr;
98117 }
98118
98119@@ -2129,8 +2531,10 @@ void exit_mmap(struct mm_struct *mm)
98120 * Walk the list again, actually closing and freeing it,
98121 * with preemption enabled, without holding any MM locks.
98122 */
98123- while (vma)
98124+ while (vma) {
98125+ vma->vm_mirror = NULL;
98126 vma = remove_vma(vma);
98127+ }
98128
98129 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
98130 }
98131@@ -2144,6 +2548,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
98132 struct vm_area_struct * __vma, * prev;
98133 struct rb_node ** rb_link, * rb_parent;
98134
98135+#ifdef CONFIG_PAX_SEGMEXEC
98136+ struct vm_area_struct *vma_m = NULL;
98137+#endif
98138+
98139 /*
98140 * The vm_pgoff of a purely anonymous vma should be irrelevant
98141 * until its first write fault, when page's anon_vma and index
98142@@ -2166,7 +2574,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
98143 if ((vma->vm_flags & VM_ACCOUNT) &&
98144 security_vm_enough_memory_mm(mm, vma_pages(vma)))
98145 return -ENOMEM;
98146+
98147+#ifdef CONFIG_PAX_SEGMEXEC
98148+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
98149+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98150+ if (!vma_m)
98151+ return -ENOMEM;
98152+ }
98153+#endif
98154+
98155 vma_link(mm, vma, prev, rb_link, rb_parent);
98156+
98157+#ifdef CONFIG_PAX_SEGMEXEC
98158+ if (vma_m)
98159+ pax_mirror_vma(vma_m, vma);
98160+#endif
98161+
98162 return 0;
98163 }
98164
98165@@ -2184,6 +2607,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98166 struct rb_node **rb_link, *rb_parent;
98167 struct mempolicy *pol;
98168
98169+ BUG_ON(vma->vm_mirror);
98170+
98171 /*
98172 * If anonymous vma has not yet been faulted, update new pgoff
98173 * to match new location, to increase its chance of merging.
98174@@ -2227,6 +2652,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98175 return new_vma;
98176 }
98177
98178+#ifdef CONFIG_PAX_SEGMEXEC
98179+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
98180+{
98181+ struct vm_area_struct *prev_m;
98182+ struct rb_node **rb_link_m, *rb_parent_m;
98183+ struct mempolicy *pol_m;
98184+
98185+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
98186+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
98187+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
98188+ *vma_m = *vma;
98189+ pol_m = vma_policy(vma_m);
98190+ mpol_get(pol_m);
98191+ vma_set_policy(vma_m, pol_m);
98192+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
98193+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
98194+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
98195+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
98196+ if (vma_m->vm_file)
98197+ get_file(vma_m->vm_file);
98198+ if (vma_m->vm_ops && vma_m->vm_ops->open)
98199+ vma_m->vm_ops->open(vma_m);
98200+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
98201+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
98202+ vma_m->vm_mirror = vma;
98203+ vma->vm_mirror = vma_m;
98204+}
98205+#endif
98206+
98207 /*
98208 * Return true if the calling process may expand its vm space by the passed
98209 * number of pages
98210@@ -2237,7 +2691,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
98211 unsigned long lim;
98212
98213 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
98214-
98215+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
98216 if (cur + npages > lim)
98217 return 0;
98218 return 1;
98219@@ -2307,6 +2761,22 @@ int install_special_mapping(struct mm_struct *mm,
98220 vma->vm_start = addr;
98221 vma->vm_end = addr + len;
98222
98223+#ifdef CONFIG_PAX_MPROTECT
98224+ if (mm->pax_flags & MF_PAX_MPROTECT) {
98225+#ifndef CONFIG_PAX_MPROTECT_COMPAT
98226+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
98227+ return -EPERM;
98228+ if (!(vm_flags & VM_EXEC))
98229+ vm_flags &= ~VM_MAYEXEC;
98230+#else
98231+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
98232+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
98233+#endif
98234+ else
98235+ vm_flags &= ~VM_MAYWRITE;
98236+ }
98237+#endif
98238+
98239 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
98240 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
98241
98242diff --git a/mm/mprotect.c b/mm/mprotect.c
98243index 1737c7e..c7faeb4 100644
98244--- a/mm/mprotect.c
98245+++ b/mm/mprotect.c
98246@@ -24,10 +24,16 @@
98247 #include <linux/mmu_notifier.h>
98248 #include <linux/migrate.h>
98249 #include <linux/perf_event.h>
98250+
98251+#ifdef CONFIG_PAX_MPROTECT
98252+#include <linux/elf.h>
98253+#endif
98254+
98255 #include <asm/uaccess.h>
98256 #include <asm/pgtable.h>
98257 #include <asm/cacheflush.h>
98258 #include <asm/tlbflush.h>
98259+#include <asm/mmu_context.h>
98260
98261 #ifndef pgprot_modify
98262 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
98263@@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma,
98264 flush_tlb_range(vma, start, end);
98265 }
98266
98267+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98268+/* called while holding the mmap semaphor for writing except stack expansion */
98269+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
98270+{
98271+ unsigned long oldlimit, newlimit = 0UL;
98272+
98273+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
98274+ return;
98275+
98276+ spin_lock(&mm->page_table_lock);
98277+ oldlimit = mm->context.user_cs_limit;
98278+ if ((prot & VM_EXEC) && oldlimit < end)
98279+ /* USER_CS limit moved up */
98280+ newlimit = end;
98281+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
98282+ /* USER_CS limit moved down */
98283+ newlimit = start;
98284+
98285+ if (newlimit) {
98286+ mm->context.user_cs_limit = newlimit;
98287+
98288+#ifdef CONFIG_SMP
98289+ wmb();
98290+ cpus_clear(mm->context.cpu_user_cs_mask);
98291+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
98292+#endif
98293+
98294+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
98295+ }
98296+ spin_unlock(&mm->page_table_lock);
98297+ if (newlimit == end) {
98298+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
98299+
98300+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
98301+ if (is_vm_hugetlb_page(vma))
98302+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
98303+ else
98304+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
98305+ }
98306+}
98307+#endif
98308+
98309 int
98310 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98311 unsigned long start, unsigned long end, unsigned long newflags)
98312@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98313 int error;
98314 int dirty_accountable = 0;
98315
98316+#ifdef CONFIG_PAX_SEGMEXEC
98317+ struct vm_area_struct *vma_m = NULL;
98318+ unsigned long start_m, end_m;
98319+
98320+ start_m = start + SEGMEXEC_TASK_SIZE;
98321+ end_m = end + SEGMEXEC_TASK_SIZE;
98322+#endif
98323+
98324 if (newflags == oldflags) {
98325 *pprev = vma;
98326 return 0;
98327 }
98328
98329+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
98330+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
98331+
98332+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
98333+ return -ENOMEM;
98334+
98335+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
98336+ return -ENOMEM;
98337+ }
98338+
98339 /*
98340 * If we make a private mapping writable we increase our commit;
98341 * but (without finer accounting) cannot reduce our commit if we
98342@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98343 }
98344 }
98345
98346+#ifdef CONFIG_PAX_SEGMEXEC
98347+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
98348+ if (start != vma->vm_start) {
98349+ error = split_vma(mm, vma, start, 1);
98350+ if (error)
98351+ goto fail;
98352+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
98353+ *pprev = (*pprev)->vm_next;
98354+ }
98355+
98356+ if (end != vma->vm_end) {
98357+ error = split_vma(mm, vma, end, 0);
98358+ if (error)
98359+ goto fail;
98360+ }
98361+
98362+ if (pax_find_mirror_vma(vma)) {
98363+ error = __do_munmap(mm, start_m, end_m - start_m);
98364+ if (error)
98365+ goto fail;
98366+ } else {
98367+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98368+ if (!vma_m) {
98369+ error = -ENOMEM;
98370+ goto fail;
98371+ }
98372+ vma->vm_flags = newflags;
98373+ pax_mirror_vma(vma_m, vma);
98374+ }
98375+ }
98376+#endif
98377+
98378 /*
98379 * First try to merge with previous and/or next vma.
98380 */
98381@@ -195,9 +293,21 @@ success:
98382 * vm_flags and vm_page_prot are protected by the mmap_sem
98383 * held in write mode.
98384 */
98385+
98386+#ifdef CONFIG_PAX_SEGMEXEC
98387+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
98388+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
98389+#endif
98390+
98391 vma->vm_flags = newflags;
98392+
98393+#ifdef CONFIG_PAX_MPROTECT
98394+ if (mm->binfmt && mm->binfmt->handle_mprotect)
98395+ mm->binfmt->handle_mprotect(vma, newflags);
98396+#endif
98397+
98398 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
98399- vm_get_page_prot(newflags));
98400+ vm_get_page_prot(vma->vm_flags));
98401
98402 if (vma_wants_writenotify(vma)) {
98403 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
98404@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98405 end = start + len;
98406 if (end <= start)
98407 return -ENOMEM;
98408+
98409+#ifdef CONFIG_PAX_SEGMEXEC
98410+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
98411+ if (end > SEGMEXEC_TASK_SIZE)
98412+ return -EINVAL;
98413+ } else
98414+#endif
98415+
98416+ if (end > TASK_SIZE)
98417+ return -EINVAL;
98418+
98419 if (!arch_validate_prot(prot))
98420 return -EINVAL;
98421
98422@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98423 /*
98424 * Does the application expect PROT_READ to imply PROT_EXEC:
98425 */
98426- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
98427+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
98428 prot |= PROT_EXEC;
98429
98430 vm_flags = calc_vm_prot_bits(prot);
98431@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98432 if (start > vma->vm_start)
98433 prev = vma;
98434
98435+#ifdef CONFIG_PAX_MPROTECT
98436+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
98437+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
98438+#endif
98439+
98440 for (nstart = start ; ; ) {
98441 unsigned long newflags;
98442
98443@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98444
98445 /* newflags >> 4 shift VM_MAY% in place of VM_% */
98446 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
98447+ if (prot & (PROT_WRITE | PROT_EXEC))
98448+ gr_log_rwxmprotect(vma->vm_file);
98449+
98450+ error = -EACCES;
98451+ goto out;
98452+ }
98453+
98454+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
98455 error = -EACCES;
98456 goto out;
98457 }
98458@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98459 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
98460 if (error)
98461 goto out;
98462+
98463+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
98464+
98465 nstart = tmp;
98466
98467 if (nstart < prev->vm_end)
98468diff --git a/mm/mremap.c b/mm/mremap.c
98469index 3e98d79..1706cec 100644
98470--- a/mm/mremap.c
98471+++ b/mm/mremap.c
98472@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
98473 continue;
98474 pte = ptep_clear_flush(vma, old_addr, old_pte);
98475 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
98476+
98477+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98478+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
98479+ pte = pte_exprotect(pte);
98480+#endif
98481+
98482 set_pte_at(mm, new_addr, new_pte, pte);
98483 }
98484
98485@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
98486 if (is_vm_hugetlb_page(vma))
98487 goto Einval;
98488
98489+#ifdef CONFIG_PAX_SEGMEXEC
98490+ if (pax_find_mirror_vma(vma))
98491+ goto Einval;
98492+#endif
98493+
98494 /* We can't remap across vm area boundaries */
98495 if (old_len > vma->vm_end - addr)
98496 goto Efault;
98497@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr,
98498 unsigned long ret = -EINVAL;
98499 unsigned long charged = 0;
98500 unsigned long map_flags;
98501+ unsigned long pax_task_size = TASK_SIZE;
98502
98503 if (new_addr & ~PAGE_MASK)
98504 goto out;
98505
98506- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
98507+#ifdef CONFIG_PAX_SEGMEXEC
98508+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98509+ pax_task_size = SEGMEXEC_TASK_SIZE;
98510+#endif
98511+
98512+ pax_task_size -= PAGE_SIZE;
98513+
98514+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
98515 goto out;
98516
98517 /* Check if the location we're moving into overlaps the
98518 * old location at all, and fail if it does.
98519 */
98520- if ((new_addr <= addr) && (new_addr+new_len) > addr)
98521- goto out;
98522-
98523- if ((addr <= new_addr) && (addr+old_len) > new_addr)
98524+ if (addr + old_len > new_addr && new_addr + new_len > addr)
98525 goto out;
98526
98527 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
98528@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr,
98529 struct vm_area_struct *vma;
98530 unsigned long ret = -EINVAL;
98531 unsigned long charged = 0;
98532+ unsigned long pax_task_size = TASK_SIZE;
98533
98534 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
98535 goto out;
98536@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr,
98537 if (!new_len)
98538 goto out;
98539
98540+#ifdef CONFIG_PAX_SEGMEXEC
98541+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98542+ pax_task_size = SEGMEXEC_TASK_SIZE;
98543+#endif
98544+
98545+ pax_task_size -= PAGE_SIZE;
98546+
98547+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
98548+ old_len > pax_task_size || addr > pax_task_size-old_len)
98549+ goto out;
98550+
98551 if (flags & MREMAP_FIXED) {
98552 if (flags & MREMAP_MAYMOVE)
98553 ret = mremap_to(addr, old_len, new_addr, new_len);
98554@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr,
98555 addr + new_len);
98556 }
98557 ret = addr;
98558+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
98559 goto out;
98560 }
98561 }
98562@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr,
98563 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
98564 if (ret)
98565 goto out;
98566+
98567+ map_flags = vma->vm_flags;
98568 ret = move_vma(vma, addr, old_len, new_len, new_addr);
98569+ if (!(ret & ~PAGE_MASK)) {
98570+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
98571+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
98572+ }
98573 }
98574 out:
98575 if (ret & ~PAGE_MASK)
98576diff --git a/mm/nommu.c b/mm/nommu.c
98577index 406e8d4..53970d3 100644
98578--- a/mm/nommu.c
98579+++ b/mm/nommu.c
98580@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
98581 int sysctl_overcommit_ratio = 50; /* default is 50% */
98582 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
98583 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
98584-int heap_stack_gap = 0;
98585
98586 atomic_long_t mmap_pages_allocated;
98587
98588@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
98589 EXPORT_SYMBOL(find_vma);
98590
98591 /*
98592- * find a VMA
98593- * - we don't extend stack VMAs under NOMMU conditions
98594- */
98595-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
98596-{
98597- return find_vma(mm, addr);
98598-}
98599-
98600-/*
98601 * expand a stack to a given address
98602 * - not supported under NOMMU conditions
98603 */
98604diff --git a/mm/page_alloc.c b/mm/page_alloc.c
98605index 3ecab7e..594a471 100644
98606--- a/mm/page_alloc.c
98607+++ b/mm/page_alloc.c
98608@@ -289,7 +289,7 @@ out:
98609 * This usage means that zero-order pages may not be compound.
98610 */
98611
98612-static void free_compound_page(struct page *page)
98613+void free_compound_page(struct page *page)
98614 {
98615 __free_pages_ok(page, compound_order(page));
98616 }
98617@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
98618 int bad = 0;
98619 int wasMlocked = __TestClearPageMlocked(page);
98620
98621+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98622+ unsigned long index = 1UL << order;
98623+#endif
98624+
98625 kmemcheck_free_shadow(page, order);
98626
98627 for (i = 0 ; i < (1 << order) ; ++i)
98628@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
98629 debug_check_no_obj_freed(page_address(page),
98630 PAGE_SIZE << order);
98631 }
98632+
98633+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98634+ for (; index; --index)
98635+ sanitize_highpage(page + index - 1);
98636+#endif
98637+
98638 arch_free_page(page, order);
98639 kernel_map_pages(page, 1 << order, 0);
98640
98641@@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
98642 arch_alloc_page(page, order);
98643 kernel_map_pages(page, 1 << order, 1);
98644
98645+#ifndef CONFIG_PAX_MEMORY_SANITIZE
98646 if (gfp_flags & __GFP_ZERO)
98647 prep_zero_page(page, order, gfp_flags);
98648+#endif
98649
98650 if (order && (gfp_flags & __GFP_COMP))
98651 prep_compound_page(page, order);
98652@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold)
98653 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
98654 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
98655 }
98656+
98657+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98658+ sanitize_highpage(page);
98659+#endif
98660+
98661 arch_free_page(page, 0);
98662 kernel_map_pages(page, 1, 0);
98663
98664@@ -2179,6 +2196,8 @@ void show_free_areas(void)
98665 int cpu;
98666 struct zone *zone;
98667
98668+ pax_track_stack();
98669+
98670 for_each_populated_zone(zone) {
98671 show_node(zone);
98672 printk("%s per-cpu:\n", zone->name);
98673@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
98674 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
98675 }
98676 #else
98677-static void inline setup_usemap(struct pglist_data *pgdat,
98678+static inline void setup_usemap(struct pglist_data *pgdat,
98679 struct zone *zone, unsigned long zonesize) {}
98680 #endif /* CONFIG_SPARSEMEM */
98681
98682diff --git a/mm/percpu.c b/mm/percpu.c
98683index c90614a..5f7b7b8 100644
98684--- a/mm/percpu.c
98685+++ b/mm/percpu.c
98686@@ -115,7 +115,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
98687 static unsigned int pcpu_high_unit_cpu __read_mostly;
98688
98689 /* the address of the first chunk which starts with the kernel static area */
98690-void *pcpu_base_addr __read_mostly;
98691+void *pcpu_base_addr __read_only;
98692 EXPORT_SYMBOL_GPL(pcpu_base_addr);
98693
98694 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
98695diff --git a/mm/rmap.c b/mm/rmap.c
98696index dd43373..d848cd7 100644
98697--- a/mm/rmap.c
98698+++ b/mm/rmap.c
98699@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98700 /* page_table_lock to protect against threads */
98701 spin_lock(&mm->page_table_lock);
98702 if (likely(!vma->anon_vma)) {
98703+
98704+#ifdef CONFIG_PAX_SEGMEXEC
98705+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
98706+
98707+ if (vma_m) {
98708+ BUG_ON(vma_m->anon_vma);
98709+ vma_m->anon_vma = anon_vma;
98710+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
98711+ }
98712+#endif
98713+
98714 vma->anon_vma = anon_vma;
98715 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
98716 allocated = NULL;
98717diff --git a/mm/shmem.c b/mm/shmem.c
98718index 3e0005b..1d659a8 100644
98719--- a/mm/shmem.c
98720+++ b/mm/shmem.c
98721@@ -31,7 +31,7 @@
98722 #include <linux/swap.h>
98723 #include <linux/ima.h>
98724
98725-static struct vfsmount *shm_mnt;
98726+struct vfsmount *shm_mnt;
98727
98728 #ifdef CONFIG_SHMEM
98729 /*
98730@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
98731 goto unlock;
98732 }
98733 entry = shmem_swp_entry(info, index, NULL);
98734+ if (!entry)
98735+ goto unlock;
98736 if (entry->val) {
98737 /*
98738 * The more uptodate page coming down from a stacked
98739@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
98740 struct vm_area_struct pvma;
98741 struct page *page;
98742
98743+ pax_track_stack();
98744+
98745 spol = mpol_cond_copy(&mpol,
98746 mpol_shared_policy_lookup(&info->policy, idx));
98747
98748@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
98749
98750 info = SHMEM_I(inode);
98751 inode->i_size = len-1;
98752- if (len <= (char *)inode - (char *)info) {
98753+ if (len <= (char *)inode - (char *)info && len <= 64) {
98754 /* do it inline */
98755 memcpy(info, symname, len);
98756 inode->i_op = &shmem_symlink_inline_operations;
98757@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
98758 int err = -ENOMEM;
98759
98760 /* Round up to L1_CACHE_BYTES to resist false sharing */
98761- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
98762- L1_CACHE_BYTES), GFP_KERNEL);
98763+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
98764 if (!sbinfo)
98765 return -ENOMEM;
98766
98767diff --git a/mm/slab.c b/mm/slab.c
98768index c8d466a..909e01e 100644
98769--- a/mm/slab.c
98770+++ b/mm/slab.c
98771@@ -174,7 +174,7 @@
98772
98773 /* Legal flag mask for kmem_cache_create(). */
98774 #if DEBUG
98775-# define CREATE_MASK (SLAB_RED_ZONE | \
98776+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
98777 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
98778 SLAB_CACHE_DMA | \
98779 SLAB_STORE_USER | \
98780@@ -182,7 +182,7 @@
98781 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
98782 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
98783 #else
98784-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
98785+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
98786 SLAB_CACHE_DMA | \
98787 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
98788 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
98789@@ -308,7 +308,7 @@ struct kmem_list3 {
98790 * Need this for bootstrapping a per node allocator.
98791 */
98792 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
98793-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
98794+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
98795 #define CACHE_CACHE 0
98796 #define SIZE_AC MAX_NUMNODES
98797 #define SIZE_L3 (2 * MAX_NUMNODES)
98798@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
98799 if ((x)->max_freeable < i) \
98800 (x)->max_freeable = i; \
98801 } while (0)
98802-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
98803-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
98804-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
98805-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
98806+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
98807+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
98808+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
98809+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
98810 #else
98811 #define STATS_INC_ACTIVE(x) do { } while (0)
98812 #define STATS_DEC_ACTIVE(x) do { } while (0)
98813@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
98814 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
98815 */
98816 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
98817- const struct slab *slab, void *obj)
98818+ const struct slab *slab, const void *obj)
98819 {
98820 u32 offset = (obj - slab->s_mem);
98821 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
98822@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
98823 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
98824 sizes[INDEX_AC].cs_size,
98825 ARCH_KMALLOC_MINALIGN,
98826- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
98827+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
98828 NULL);
98829
98830 if (INDEX_AC != INDEX_L3) {
98831@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
98832 kmem_cache_create(names[INDEX_L3].name,
98833 sizes[INDEX_L3].cs_size,
98834 ARCH_KMALLOC_MINALIGN,
98835- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
98836+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
98837 NULL);
98838 }
98839
98840@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
98841 sizes->cs_cachep = kmem_cache_create(names->name,
98842 sizes->cs_size,
98843 ARCH_KMALLOC_MINALIGN,
98844- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
98845+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
98846 NULL);
98847 }
98848 #ifdef CONFIG_ZONE_DMA
98849@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
98850 }
98851 /* cpu stats */
98852 {
98853- unsigned long allochit = atomic_read(&cachep->allochit);
98854- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
98855- unsigned long freehit = atomic_read(&cachep->freehit);
98856- unsigned long freemiss = atomic_read(&cachep->freemiss);
98857+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
98858+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
98859+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
98860+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
98861
98862 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
98863 allochit, allocmiss, freehit, freemiss);
98864@@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
98865
98866 static int __init slab_proc_init(void)
98867 {
98868- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
98869+ mode_t gr_mode = S_IRUGO;
98870+
98871+#ifdef CONFIG_GRKERNSEC_PROC_ADD
98872+ gr_mode = S_IRUSR;
98873+#endif
98874+
98875+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
98876 #ifdef CONFIG_DEBUG_SLAB_LEAK
98877- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
98878+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
98879 #endif
98880 return 0;
98881 }
98882 module_init(slab_proc_init);
98883 #endif
98884
98885+void check_object_size(const void *ptr, unsigned long n, bool to)
98886+{
98887+
98888+#ifdef CONFIG_PAX_USERCOPY
98889+ struct page *page;
98890+ struct kmem_cache *cachep = NULL;
98891+ struct slab *slabp;
98892+ unsigned int objnr;
98893+ unsigned long offset;
98894+ const char *type;
98895+
98896+ if (!n)
98897+ return;
98898+
98899+ type = "<null>";
98900+ if (ZERO_OR_NULL_PTR(ptr))
98901+ goto report;
98902+
98903+ if (!virt_addr_valid(ptr))
98904+ return;
98905+
98906+ page = virt_to_head_page(ptr);
98907+
98908+ type = "<process stack>";
98909+ if (!PageSlab(page)) {
98910+ if (object_is_on_stack(ptr, n) == -1)
98911+ goto report;
98912+ return;
98913+ }
98914+
98915+ cachep = page_get_cache(page);
98916+ type = cachep->name;
98917+ if (!(cachep->flags & SLAB_USERCOPY))
98918+ goto report;
98919+
98920+ slabp = page_get_slab(page);
98921+ objnr = obj_to_index(cachep, slabp, ptr);
98922+ BUG_ON(objnr >= cachep->num);
98923+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
98924+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
98925+ return;
98926+
98927+report:
98928+ pax_report_usercopy(ptr, n, to, type);
98929+#endif
98930+
98931+}
98932+EXPORT_SYMBOL(check_object_size);
98933+
98934 /**
98935 * ksize - get the actual amount of memory allocated for a given object
98936 * @objp: Pointer to the object
98937diff --git a/mm/slob.c b/mm/slob.c
98938index 837ebd6..0bd23bc 100644
98939--- a/mm/slob.c
98940+++ b/mm/slob.c
98941@@ -29,7 +29,7 @@
98942 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
98943 * alloc_pages() directly, allocating compound pages so the page order
98944 * does not have to be separately tracked, and also stores the exact
98945- * allocation size in page->private so that it can be used to accurately
98946+ * allocation size in slob_page->size so that it can be used to accurately
98947 * provide ksize(). These objects are detected in kfree() because slob_page()
98948 * is false for them.
98949 *
98950@@ -58,6 +58,7 @@
98951 */
98952
98953 #include <linux/kernel.h>
98954+#include <linux/sched.h>
98955 #include <linux/slab.h>
98956 #include <linux/mm.h>
98957 #include <linux/swap.h> /* struct reclaim_state */
98958@@ -100,7 +101,8 @@ struct slob_page {
98959 unsigned long flags; /* mandatory */
98960 atomic_t _count; /* mandatory */
98961 slobidx_t units; /* free units left in page */
98962- unsigned long pad[2];
98963+ unsigned long pad[1];
98964+ unsigned long size; /* size when >=PAGE_SIZE */
98965 slob_t *free; /* first free slob_t in page */
98966 struct list_head list; /* linked list of free pages */
98967 };
98968@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
98969 */
98970 static inline int is_slob_page(struct slob_page *sp)
98971 {
98972- return PageSlab((struct page *)sp);
98973+ return PageSlab((struct page *)sp) && !sp->size;
98974 }
98975
98976 static inline void set_slob_page(struct slob_page *sp)
98977@@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
98978
98979 static inline struct slob_page *slob_page(const void *addr)
98980 {
98981- return (struct slob_page *)virt_to_page(addr);
98982+ return (struct slob_page *)virt_to_head_page(addr);
98983 }
98984
98985 /*
98986@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
98987 /*
98988 * Return the size of a slob block.
98989 */
98990-static slobidx_t slob_units(slob_t *s)
98991+static slobidx_t slob_units(const slob_t *s)
98992 {
98993 if (s->units > 0)
98994 return s->units;
98995@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
98996 /*
98997 * Return the next free slob block pointer after this one.
98998 */
98999-static slob_t *slob_next(slob_t *s)
99000+static slob_t *slob_next(const slob_t *s)
99001 {
99002 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
99003 slobidx_t next;
99004@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
99005 /*
99006 * Returns true if s is the last free block in its page.
99007 */
99008-static int slob_last(slob_t *s)
99009+static int slob_last(const slob_t *s)
99010 {
99011 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
99012 }
99013@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
99014 if (!page)
99015 return NULL;
99016
99017+ set_slob_page(page);
99018 return page_address(page);
99019 }
99020
99021@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
99022 if (!b)
99023 return NULL;
99024 sp = slob_page(b);
99025- set_slob_page(sp);
99026
99027 spin_lock_irqsave(&slob_lock, flags);
99028 sp->units = SLOB_UNITS(PAGE_SIZE);
99029 sp->free = b;
99030+ sp->size = 0;
99031 INIT_LIST_HEAD(&sp->list);
99032 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
99033 set_slob_page_free(sp, slob_list);
99034@@ -475,10 +478,9 @@ out:
99035 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
99036 #endif
99037
99038-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
99039+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
99040 {
99041- unsigned int *m;
99042- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99043+ slob_t *m;
99044 void *ret;
99045
99046 lockdep_trace_alloc(gfp);
99047@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
99048
99049 if (!m)
99050 return NULL;
99051- *m = size;
99052+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
99053+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
99054+ m[0].units = size;
99055+ m[1].units = align;
99056 ret = (void *)m + align;
99057
99058 trace_kmalloc_node(_RET_IP_, ret,
99059@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
99060
99061 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
99062 if (ret) {
99063- struct page *page;
99064- page = virt_to_page(ret);
99065- page->private = size;
99066+ struct slob_page *sp;
99067+ sp = slob_page(ret);
99068+ sp->size = size;
99069 }
99070
99071 trace_kmalloc_node(_RET_IP_, ret,
99072 size, PAGE_SIZE << order, gfp, node);
99073 }
99074
99075- kmemleak_alloc(ret, size, 1, gfp);
99076+ return ret;
99077+}
99078+
99079+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
99080+{
99081+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99082+ void *ret = __kmalloc_node_align(size, gfp, node, align);
99083+
99084+ if (!ZERO_OR_NULL_PTR(ret))
99085+ kmemleak_alloc(ret, size, 1, gfp);
99086 return ret;
99087 }
99088 EXPORT_SYMBOL(__kmalloc_node);
99089@@ -528,13 +542,92 @@ void kfree(const void *block)
99090 sp = slob_page(block);
99091 if (is_slob_page(sp)) {
99092 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99093- unsigned int *m = (unsigned int *)(block - align);
99094- slob_free(m, *m + align);
99095- } else
99096+ slob_t *m = (slob_t *)(block - align);
99097+ slob_free(m, m[0].units + align);
99098+ } else {
99099+ clear_slob_page(sp);
99100+ free_slob_page(sp);
99101+ sp->size = 0;
99102 put_page(&sp->page);
99103+ }
99104 }
99105 EXPORT_SYMBOL(kfree);
99106
99107+void check_object_size(const void *ptr, unsigned long n, bool to)
99108+{
99109+
99110+#ifdef CONFIG_PAX_USERCOPY
99111+ struct slob_page *sp;
99112+ const slob_t *free;
99113+ const void *base;
99114+ unsigned long flags;
99115+ const char *type;
99116+
99117+ if (!n)
99118+ return;
99119+
99120+ type = "<null>";
99121+ if (ZERO_OR_NULL_PTR(ptr))
99122+ goto report;
99123+
99124+ if (!virt_addr_valid(ptr))
99125+ return;
99126+
99127+ type = "<process stack>";
99128+ sp = slob_page(ptr);
99129+ if (!PageSlab((struct page *)sp)) {
99130+ if (object_is_on_stack(ptr, n) == -1)
99131+ goto report;
99132+ return;
99133+ }
99134+
99135+ type = "<slob>";
99136+ if (sp->size) {
99137+ base = page_address(&sp->page);
99138+ if (base <= ptr && n <= sp->size - (ptr - base))
99139+ return;
99140+ goto report;
99141+ }
99142+
99143+ /* some tricky double walking to find the chunk */
99144+ spin_lock_irqsave(&slob_lock, flags);
99145+ base = (void *)((unsigned long)ptr & PAGE_MASK);
99146+ free = sp->free;
99147+
99148+ while (!slob_last(free) && (void *)free <= ptr) {
99149+ base = free + slob_units(free);
99150+ free = slob_next(free);
99151+ }
99152+
99153+ while (base < (void *)free) {
99154+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
99155+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
99156+ int offset;
99157+
99158+ if (ptr < base + align)
99159+ break;
99160+
99161+ offset = ptr - base - align;
99162+ if (offset >= m) {
99163+ base += size;
99164+ continue;
99165+ }
99166+
99167+ if (n > m - offset)
99168+ break;
99169+
99170+ spin_unlock_irqrestore(&slob_lock, flags);
99171+ return;
99172+ }
99173+
99174+ spin_unlock_irqrestore(&slob_lock, flags);
99175+report:
99176+ pax_report_usercopy(ptr, n, to, type);
99177+#endif
99178+
99179+}
99180+EXPORT_SYMBOL(check_object_size);
99181+
99182 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
99183 size_t ksize(const void *block)
99184 {
99185@@ -547,10 +640,10 @@ size_t ksize(const void *block)
99186 sp = slob_page(block);
99187 if (is_slob_page(sp)) {
99188 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99189- unsigned int *m = (unsigned int *)(block - align);
99190- return SLOB_UNITS(*m) * SLOB_UNIT;
99191+ slob_t *m = (slob_t *)(block - align);
99192+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
99193 } else
99194- return sp->page.private;
99195+ return sp->size;
99196 }
99197 EXPORT_SYMBOL(ksize);
99198
99199@@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
99200 {
99201 struct kmem_cache *c;
99202
99203+#ifdef CONFIG_PAX_USERCOPY
99204+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
99205+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
99206+#else
99207 c = slob_alloc(sizeof(struct kmem_cache),
99208 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
99209+#endif
99210
99211 if (c) {
99212 c->name = name;
99213@@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
99214 {
99215 void *b;
99216
99217+#ifdef CONFIG_PAX_USERCOPY
99218+ b = __kmalloc_node_align(c->size, flags, node, c->align);
99219+#else
99220 if (c->size < PAGE_SIZE) {
99221 b = slob_alloc(c->size, flags, c->align, node);
99222 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
99223 SLOB_UNITS(c->size) * SLOB_UNIT,
99224 flags, node);
99225 } else {
99226+ struct slob_page *sp;
99227+
99228 b = slob_new_pages(flags, get_order(c->size), node);
99229+ sp = slob_page(b);
99230+ sp->size = c->size;
99231 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
99232 PAGE_SIZE << get_order(c->size),
99233 flags, node);
99234 }
99235+#endif
99236
99237 if (c->ctor)
99238 c->ctor(b);
99239@@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
99240
99241 static void __kmem_cache_free(void *b, int size)
99242 {
99243- if (size < PAGE_SIZE)
99244+ struct slob_page *sp = slob_page(b);
99245+
99246+ if (is_slob_page(sp))
99247 slob_free(b, size);
99248- else
99249+ else {
99250+ clear_slob_page(sp);
99251+ free_slob_page(sp);
99252+ sp->size = 0;
99253 slob_free_pages(b, get_order(size));
99254+ }
99255 }
99256
99257 static void kmem_rcu_free(struct rcu_head *head)
99258@@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
99259
99260 void kmem_cache_free(struct kmem_cache *c, void *b)
99261 {
99262+ int size = c->size;
99263+
99264+#ifdef CONFIG_PAX_USERCOPY
99265+ if (size + c->align < PAGE_SIZE) {
99266+ size += c->align;
99267+ b -= c->align;
99268+ }
99269+#endif
99270+
99271 kmemleak_free_recursive(b, c->flags);
99272 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
99273 struct slob_rcu *slob_rcu;
99274- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
99275+ slob_rcu = b + (size - sizeof(struct slob_rcu));
99276 INIT_RCU_HEAD(&slob_rcu->head);
99277- slob_rcu->size = c->size;
99278+ slob_rcu->size = size;
99279 call_rcu(&slob_rcu->head, kmem_rcu_free);
99280 } else {
99281- __kmem_cache_free(b, c->size);
99282+ __kmem_cache_free(b, size);
99283 }
99284
99285+#ifdef CONFIG_PAX_USERCOPY
99286+ trace_kfree(_RET_IP_, b);
99287+#else
99288 trace_kmem_cache_free(_RET_IP_, b);
99289+#endif
99290+
99291 }
99292 EXPORT_SYMBOL(kmem_cache_free);
99293
99294diff --git a/mm/slub.c b/mm/slub.c
99295index 4996fc7..87e01d0 100644
99296--- a/mm/slub.c
99297+++ b/mm/slub.c
99298@@ -201,7 +201,7 @@ struct track {
99299
99300 enum track_item { TRACK_ALLOC, TRACK_FREE };
99301
99302-#ifdef CONFIG_SLUB_DEBUG
99303+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99304 static int sysfs_slab_add(struct kmem_cache *);
99305 static int sysfs_slab_alias(struct kmem_cache *, const char *);
99306 static void sysfs_slab_remove(struct kmem_cache *);
99307@@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t)
99308 if (!t->addr)
99309 return;
99310
99311- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
99312+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
99313 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
99314 }
99315
99316@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
99317
99318 page = virt_to_head_page(x);
99319
99320+ BUG_ON(!PageSlab(page));
99321+
99322 slab_free(s, page, x, _RET_IP_);
99323
99324 trace_kmem_cache_free(_RET_IP_, x);
99325@@ -1937,7 +1939,7 @@ static int slub_min_objects;
99326 * Merge control. If this is set then no merging of slab caches will occur.
99327 * (Could be removed. This was introduced to pacify the merge skeptics.)
99328 */
99329-static int slub_nomerge;
99330+static int slub_nomerge = 1;
99331
99332 /*
99333 * Calculate the order of allocation given an slab object size.
99334@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
99335 * list to avoid pounding the page allocator excessively.
99336 */
99337 set_min_partial(s, ilog2(s->size));
99338- s->refcount = 1;
99339+ atomic_set(&s->refcount, 1);
99340 #ifdef CONFIG_NUMA
99341 s->remote_node_defrag_ratio = 1000;
99342 #endif
99343@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
99344 void kmem_cache_destroy(struct kmem_cache *s)
99345 {
99346 down_write(&slub_lock);
99347- s->refcount--;
99348- if (!s->refcount) {
99349+ if (atomic_dec_and_test(&s->refcount)) {
99350 list_del(&s->list);
99351 up_write(&slub_lock);
99352 if (kmem_cache_close(s)) {
99353@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str)
99354 __setup("slub_nomerge", setup_slub_nomerge);
99355
99356 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
99357- const char *name, int size, gfp_t gfp_flags)
99358+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
99359 {
99360- unsigned int flags = 0;
99361-
99362 if (gfp_flags & SLUB_DMA)
99363- flags = SLAB_CACHE_DMA;
99364+ flags |= SLAB_CACHE_DMA;
99365
99366 /*
99367 * This function is called with IRQs disabled during early-boot on
99368@@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
99369 EXPORT_SYMBOL(__kmalloc_node);
99370 #endif
99371
99372+void check_object_size(const void *ptr, unsigned long n, bool to)
99373+{
99374+
99375+#ifdef CONFIG_PAX_USERCOPY
99376+ struct page *page;
99377+ struct kmem_cache *s = NULL;
99378+ unsigned long offset;
99379+ const char *type;
99380+
99381+ if (!n)
99382+ return;
99383+
99384+ type = "<null>";
99385+ if (ZERO_OR_NULL_PTR(ptr))
99386+ goto report;
99387+
99388+ if (!virt_addr_valid(ptr))
99389+ return;
99390+
99391+ page = get_object_page(ptr);
99392+
99393+ type = "<process stack>";
99394+ if (!page) {
99395+ if (object_is_on_stack(ptr, n) == -1)
99396+ goto report;
99397+ return;
99398+ }
99399+
99400+ s = page->slab;
99401+ type = s->name;
99402+ if (!(s->flags & SLAB_USERCOPY))
99403+ goto report;
99404+
99405+ offset = (ptr - page_address(page)) % s->size;
99406+ if (offset <= s->objsize && n <= s->objsize - offset)
99407+ return;
99408+
99409+report:
99410+ pax_report_usercopy(ptr, n, to, type);
99411+#endif
99412+
99413+}
99414+EXPORT_SYMBOL(check_object_size);
99415+
99416 size_t ksize(const void *object)
99417 {
99418 struct page *page;
99419@@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
99420 * kmem_cache_open for slab_state == DOWN.
99421 */
99422 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
99423- sizeof(struct kmem_cache_node), GFP_NOWAIT);
99424- kmalloc_caches[0].refcount = -1;
99425+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
99426+ atomic_set(&kmalloc_caches[0].refcount, -1);
99427 caches++;
99428
99429 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
99430@@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
99431 /* Caches that are not of the two-to-the-power-of size */
99432 if (KMALLOC_MIN_SIZE <= 32) {
99433 create_kmalloc_cache(&kmalloc_caches[1],
99434- "kmalloc-96", 96, GFP_NOWAIT);
99435+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
99436 caches++;
99437 }
99438 if (KMALLOC_MIN_SIZE <= 64) {
99439 create_kmalloc_cache(&kmalloc_caches[2],
99440- "kmalloc-192", 192, GFP_NOWAIT);
99441+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
99442 caches++;
99443 }
99444
99445 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
99446 create_kmalloc_cache(&kmalloc_caches[i],
99447- "kmalloc", 1 << i, GFP_NOWAIT);
99448+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
99449 caches++;
99450 }
99451
99452@@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
99453 /*
99454 * We may have set a slab to be unmergeable during bootstrap.
99455 */
99456- if (s->refcount < 0)
99457+ if (atomic_read(&s->refcount) < 0)
99458 return 1;
99459
99460 return 0;
99461@@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
99462 if (s) {
99463 int cpu;
99464
99465- s->refcount++;
99466+ atomic_inc(&s->refcount);
99467 /*
99468 * Adjust the object sizes so that we clear
99469 * the complete object on kzalloc.
99470@@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
99471
99472 if (sysfs_slab_alias(s, name)) {
99473 down_write(&slub_lock);
99474- s->refcount--;
99475+ atomic_dec(&s->refcount);
99476 up_write(&slub_lock);
99477 goto err;
99478 }
99479@@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
99480
99481 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
99482 {
99483- return sprintf(buf, "%d\n", s->refcount - 1);
99484+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
99485 }
99486 SLAB_ATTR_RO(aliases);
99487
99488@@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
99489 kfree(s);
99490 }
99491
99492-static struct sysfs_ops slab_sysfs_ops = {
99493+static const struct sysfs_ops slab_sysfs_ops = {
99494 .show = slab_attr_show,
99495 .store = slab_attr_store,
99496 };
99497@@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
99498 return 0;
99499 }
99500
99501-static struct kset_uevent_ops slab_uevent_ops = {
99502+static const struct kset_uevent_ops slab_uevent_ops = {
99503 .filter = uevent_filter,
99504 };
99505
99506@@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
99507 return name;
99508 }
99509
99510+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99511 static int sysfs_slab_add(struct kmem_cache *s)
99512 {
99513 int err;
99514@@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
99515 kobject_del(&s->kobj);
99516 kobject_put(&s->kobj);
99517 }
99518+#endif
99519
99520 /*
99521 * Need to buffer aliases during bootup until sysfs becomes
99522@@ -4632,6 +4677,7 @@ struct saved_alias {
99523
99524 static struct saved_alias *alias_list;
99525
99526+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99527 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
99528 {
99529 struct saved_alias *al;
99530@@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
99531 alias_list = al;
99532 return 0;
99533 }
99534+#endif
99535
99536 static int __init slab_sysfs_init(void)
99537 {
99538@@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
99539
99540 static int __init slab_proc_init(void)
99541 {
99542- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
99543+ mode_t gr_mode = S_IRUGO;
99544+
99545+#ifdef CONFIG_GRKERNSEC_PROC_ADD
99546+ gr_mode = S_IRUSR;
99547+#endif
99548+
99549+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
99550 return 0;
99551 }
99552 module_init(slab_proc_init);
99553diff --git a/mm/swap.c b/mm/swap.c
99554index 308e57d..5de19c0 100644
99555--- a/mm/swap.c
99556+++ b/mm/swap.c
99557@@ -30,6 +30,7 @@
99558 #include <linux/notifier.h>
99559 #include <linux/backing-dev.h>
99560 #include <linux/memcontrol.h>
99561+#include <linux/hugetlb.h>
99562
99563 #include "internal.h"
99564
99565@@ -65,6 +66,8 @@ static void put_compound_page(struct page *page)
99566 compound_page_dtor *dtor;
99567
99568 dtor = get_compound_page_dtor(page);
99569+ if (!PageHuge(page))
99570+ BUG_ON(dtor != free_compound_page);
99571 (*dtor)(page);
99572 }
99573 }
99574diff --git a/mm/util.c b/mm/util.c
99575index e48b493..24a601d 100644
99576--- a/mm/util.c
99577+++ b/mm/util.c
99578@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
99579 void arch_pick_mmap_layout(struct mm_struct *mm)
99580 {
99581 mm->mmap_base = TASK_UNMAPPED_BASE;
99582+
99583+#ifdef CONFIG_PAX_RANDMMAP
99584+ if (mm->pax_flags & MF_PAX_RANDMMAP)
99585+ mm->mmap_base += mm->delta_mmap;
99586+#endif
99587+
99588 mm->get_unmapped_area = arch_get_unmapped_area;
99589 mm->unmap_area = arch_unmap_area;
99590 }
99591diff --git a/mm/vmalloc.c b/mm/vmalloc.c
99592index f34ffd0..90d7407 100644
99593--- a/mm/vmalloc.c
99594+++ b/mm/vmalloc.c
99595@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
99596
99597 pte = pte_offset_kernel(pmd, addr);
99598 do {
99599- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
99600- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
99601+
99602+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
99603+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
99604+ BUG_ON(!pte_exec(*pte));
99605+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
99606+ continue;
99607+ }
99608+#endif
99609+
99610+ {
99611+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
99612+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
99613+ }
99614 } while (pte++, addr += PAGE_SIZE, addr != end);
99615 }
99616
99617@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
99618 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
99619 {
99620 pte_t *pte;
99621+ int ret = -ENOMEM;
99622
99623 /*
99624 * nr is a running index into the array which helps higher level
99625@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
99626 pte = pte_alloc_kernel(pmd, addr);
99627 if (!pte)
99628 return -ENOMEM;
99629+
99630+ pax_open_kernel();
99631 do {
99632 struct page *page = pages[*nr];
99633
99634- if (WARN_ON(!pte_none(*pte)))
99635- return -EBUSY;
99636- if (WARN_ON(!page))
99637- return -ENOMEM;
99638+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
99639+ if (!(pgprot_val(prot) & _PAGE_NX))
99640+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
99641+ else
99642+#endif
99643+
99644+ if (WARN_ON(!pte_none(*pte))) {
99645+ ret = -EBUSY;
99646+ goto out;
99647+ }
99648+ if (WARN_ON(!page)) {
99649+ ret = -ENOMEM;
99650+ goto out;
99651+ }
99652 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
99653 (*nr)++;
99654 } while (pte++, addr += PAGE_SIZE, addr != end);
99655- return 0;
99656+ ret = 0;
99657+out:
99658+ pax_close_kernel();
99659+ return ret;
99660 }
99661
99662 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
99663@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x)
99664 * and fall back on vmalloc() if that fails. Others
99665 * just put it in the vmalloc space.
99666 */
99667-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
99668+#ifdef CONFIG_MODULES
99669+#ifdef MODULES_VADDR
99670 unsigned long addr = (unsigned long)x;
99671 if (addr >= MODULES_VADDR && addr < MODULES_END)
99672 return 1;
99673 #endif
99674+
99675+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
99676+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
99677+ return 1;
99678+#endif
99679+
99680+#endif
99681+
99682 return is_vmalloc_addr(x);
99683 }
99684
99685@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
99686
99687 if (!pgd_none(*pgd)) {
99688 pud_t *pud = pud_offset(pgd, addr);
99689+#ifdef CONFIG_X86
99690+ if (!pud_large(*pud))
99691+#endif
99692 if (!pud_none(*pud)) {
99693 pmd_t *pmd = pmd_offset(pud, addr);
99694+#ifdef CONFIG_X86
99695+ if (!pmd_large(*pmd))
99696+#endif
99697 if (!pmd_none(*pmd)) {
99698 pte_t *ptep, pte;
99699
99700@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va)
99701 struct rb_node *tmp;
99702
99703 while (*p) {
99704- struct vmap_area *tmp;
99705+ struct vmap_area *varea;
99706
99707 parent = *p;
99708- tmp = rb_entry(parent, struct vmap_area, rb_node);
99709- if (va->va_start < tmp->va_end)
99710+ varea = rb_entry(parent, struct vmap_area, rb_node);
99711+ if (va->va_start < varea->va_end)
99712 p = &(*p)->rb_left;
99713- else if (va->va_end > tmp->va_start)
99714+ else if (va->va_end > varea->va_start)
99715 p = &(*p)->rb_right;
99716 else
99717 BUG();
99718@@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
99719 struct vm_struct *area;
99720
99721 BUG_ON(in_interrupt());
99722+
99723+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
99724+ if (flags & VM_KERNEXEC) {
99725+ if (start != VMALLOC_START || end != VMALLOC_END)
99726+ return NULL;
99727+ start = (unsigned long)MODULES_EXEC_VADDR;
99728+ end = (unsigned long)MODULES_EXEC_END;
99729+ }
99730+#endif
99731+
99732 if (flags & VM_IOREMAP) {
99733 int bit = fls(size);
99734
99735@@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
99736 if (count > totalram_pages)
99737 return NULL;
99738
99739+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
99740+ if (!(pgprot_val(prot) & _PAGE_NX))
99741+ flags |= VM_KERNEXEC;
99742+#endif
99743+
99744 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
99745 __builtin_return_address(0));
99746 if (!area)
99747@@ -1594,6 +1651,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
99748 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
99749 return NULL;
99750
99751+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
99752+ if (!(pgprot_val(prot) & _PAGE_NX))
99753+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
99754+ VMALLOC_START, VMALLOC_END, node,
99755+ gfp_mask, caller);
99756+ else
99757+#endif
99758+
99759 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
99760 VMALLOC_START, VMALLOC_END, node,
99761 gfp_mask, caller);
99762@@ -1698,10 +1763,9 @@ EXPORT_SYMBOL(vmalloc_node);
99763 * For tight control over page level allocator and protection flags
99764 * use __vmalloc() instead.
99765 */
99766-
99767 void *vmalloc_exec(unsigned long size)
99768 {
99769- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
99770+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
99771 -1, __builtin_return_address(0));
99772 }
99773
99774@@ -1998,6 +2062,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
99775 unsigned long uaddr = vma->vm_start;
99776 unsigned long usize = vma->vm_end - vma->vm_start;
99777
99778+ BUG_ON(vma->vm_mirror);
99779+
99780 if ((PAGE_SIZE-1) & (unsigned long)addr)
99781 return -EINVAL;
99782
99783diff --git a/mm/vmstat.c b/mm/vmstat.c
99784index 42d76c6..5643dc4 100644
99785--- a/mm/vmstat.c
99786+++ b/mm/vmstat.c
99787@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
99788 *
99789 * vm_stat contains the global counters
99790 */
99791-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
99792+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
99793 EXPORT_SYMBOL(vm_stat);
99794
99795 #ifdef CONFIG_SMP
99796@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
99797 v = p->vm_stat_diff[i];
99798 p->vm_stat_diff[i] = 0;
99799 local_irq_restore(flags);
99800- atomic_long_add(v, &zone->vm_stat[i]);
99801+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
99802 global_diff[i] += v;
99803 #ifdef CONFIG_NUMA
99804 /* 3 seconds idle till flush */
99805@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
99806
99807 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
99808 if (global_diff[i])
99809- atomic_long_add(global_diff[i], &vm_stat[i]);
99810+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
99811 }
99812
99813 #endif
99814@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
99815 start_cpu_timer(cpu);
99816 #endif
99817 #ifdef CONFIG_PROC_FS
99818- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
99819- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
99820- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
99821- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
99822+ {
99823+ mode_t gr_mode = S_IRUGO;
99824+#ifdef CONFIG_GRKERNSEC_PROC_ADD
99825+ gr_mode = S_IRUSR;
99826+#endif
99827+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
99828+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
99829+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
99830+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
99831+#else
99832+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
99833+#endif
99834+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
99835+ }
99836 #endif
99837 return 0;
99838 }
99839diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
99840index a29c5ab..6143f20 100644
99841--- a/net/8021q/vlan.c
99842+++ b/net/8021q/vlan.c
99843@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
99844 err = -EPERM;
99845 if (!capable(CAP_NET_ADMIN))
99846 break;
99847- if ((args.u.name_type >= 0) &&
99848- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
99849+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
99850 struct vlan_net *vn;
99851
99852 vn = net_generic(net, vlan_net_id);
99853diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
99854index a2d2984..f9eb711 100644
99855--- a/net/9p/trans_fd.c
99856+++ b/net/9p/trans_fd.c
99857@@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
99858 oldfs = get_fs();
99859 set_fs(get_ds());
99860 /* The cast to a user pointer is valid due to the set_fs() */
99861- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
99862+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
99863 set_fs(oldfs);
99864
99865 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
99866diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
99867index 02cc7e7..4514f1b 100644
99868--- a/net/atm/atm_misc.c
99869+++ b/net/atm/atm_misc.c
99870@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
99871 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
99872 return 1;
99873 atm_return(vcc,truesize);
99874- atomic_inc(&vcc->stats->rx_drop);
99875+ atomic_inc_unchecked(&vcc->stats->rx_drop);
99876 return 0;
99877 }
99878
99879@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
99880 }
99881 }
99882 atm_return(vcc,guess);
99883- atomic_inc(&vcc->stats->rx_drop);
99884+ atomic_inc_unchecked(&vcc->stats->rx_drop);
99885 return NULL;
99886 }
99887
99888@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
99889
99890 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
99891 {
99892-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
99893+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
99894 __SONET_ITEMS
99895 #undef __HANDLE_ITEM
99896 }
99897@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
99898
99899 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
99900 {
99901-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
99902+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
99903 __SONET_ITEMS
99904 #undef __HANDLE_ITEM
99905 }
99906diff --git a/net/atm/lec.h b/net/atm/lec.h
99907index 9d14d19..5c145f3 100644
99908--- a/net/atm/lec.h
99909+++ b/net/atm/lec.h
99910@@ -48,7 +48,7 @@ struct lane2_ops {
99911 const u8 *tlvs, u32 sizeoftlvs);
99912 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
99913 const u8 *tlvs, u32 sizeoftlvs);
99914-};
99915+} __no_const;
99916
99917 /*
99918 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
99919diff --git a/net/atm/mpc.h b/net/atm/mpc.h
99920index 0919a88..a23d54e 100644
99921--- a/net/atm/mpc.h
99922+++ b/net/atm/mpc.h
99923@@ -33,7 +33,7 @@ struct mpoa_client {
99924 struct mpc_parameters parameters; /* parameters for this client */
99925
99926 const struct net_device_ops *old_ops;
99927- struct net_device_ops new_ops;
99928+ net_device_ops_no_const new_ops;
99929 };
99930
99931
99932diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
99933index 4504a4b..1733f1e 100644
99934--- a/net/atm/mpoa_caches.c
99935+++ b/net/atm/mpoa_caches.c
99936@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client)
99937 struct timeval now;
99938 struct k_message msg;
99939
99940+ pax_track_stack();
99941+
99942 do_gettimeofday(&now);
99943
99944 write_lock_irq(&client->egress_lock);
99945diff --git a/net/atm/proc.c b/net/atm/proc.c
99946index ab8419a..aa91497 100644
99947--- a/net/atm/proc.c
99948+++ b/net/atm/proc.c
99949@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
99950 const struct k_atm_aal_stats *stats)
99951 {
99952 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
99953- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
99954- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
99955- atomic_read(&stats->rx_drop));
99956+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
99957+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
99958+ atomic_read_unchecked(&stats->rx_drop));
99959 }
99960
99961 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
99962@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
99963 {
99964 struct sock *sk = sk_atm(vcc);
99965
99966+#ifdef CONFIG_GRKERNSEC_HIDESYM
99967+ seq_printf(seq, "%p ", NULL);
99968+#else
99969 seq_printf(seq, "%p ", vcc);
99970+#endif
99971+
99972 if (!vcc->dev)
99973 seq_printf(seq, "Unassigned ");
99974 else
99975@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
99976 {
99977 if (!vcc->dev)
99978 seq_printf(seq, sizeof(void *) == 4 ?
99979+#ifdef CONFIG_GRKERNSEC_HIDESYM
99980+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
99981+#else
99982 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
99983+#endif
99984 else
99985 seq_printf(seq, "%3d %3d %5d ",
99986 vcc->dev->number, vcc->vpi, vcc->vci);
99987diff --git a/net/atm/resources.c b/net/atm/resources.c
99988index 56b7322..c48b84e 100644
99989--- a/net/atm/resources.c
99990+++ b/net/atm/resources.c
99991@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev)
99992 static void copy_aal_stats(struct k_atm_aal_stats *from,
99993 struct atm_aal_stats *to)
99994 {
99995-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
99996+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
99997 __AAL_STAT_ITEMS
99998 #undef __HANDLE_ITEM
99999 }
100000@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
100001 static void subtract_aal_stats(struct k_atm_aal_stats *from,
100002 struct atm_aal_stats *to)
100003 {
100004-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100005+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
100006 __AAL_STAT_ITEMS
100007 #undef __HANDLE_ITEM
100008 }
100009diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
100010index 8567d47..bba2292 100644
100011--- a/net/bridge/br_private.h
100012+++ b/net/bridge/br_private.h
100013@@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
100014
100015 #ifdef CONFIG_SYSFS
100016 /* br_sysfs_if.c */
100017-extern struct sysfs_ops brport_sysfs_ops;
100018+extern const struct sysfs_ops brport_sysfs_ops;
100019 extern int br_sysfs_addif(struct net_bridge_port *p);
100020
100021 /* br_sysfs_br.c */
100022diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
100023index 9a52ac5..c97538e 100644
100024--- a/net/bridge/br_stp_if.c
100025+++ b/net/bridge/br_stp_if.c
100026@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
100027 char *envp[] = { NULL };
100028
100029 if (br->stp_enabled == BR_USER_STP) {
100030- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
100031+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
100032 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
100033 br->dev->name, r);
100034
100035diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
100036index 820643a..ce77fb3 100644
100037--- a/net/bridge/br_sysfs_if.c
100038+++ b/net/bridge/br_sysfs_if.c
100039@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj,
100040 return ret;
100041 }
100042
100043-struct sysfs_ops brport_sysfs_ops = {
100044+const struct sysfs_ops brport_sysfs_ops = {
100045 .show = brport_show,
100046 .store = brport_store,
100047 };
100048diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
100049index d73d47f..72df42a 100644
100050--- a/net/bridge/netfilter/ebtables.c
100051+++ b/net/bridge/netfilter/ebtables.c
100052@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
100053 unsigned int entries_size, nentries;
100054 char *entries;
100055
100056+ pax_track_stack();
100057+
100058 if (cmd == EBT_SO_GET_ENTRIES) {
100059 entries_size = t->private->entries_size;
100060 nentries = t->private->nentries;
100061diff --git a/net/can/bcm.c b/net/can/bcm.c
100062index 2ffd2e0..72a7486 100644
100063--- a/net/can/bcm.c
100064+++ b/net/can/bcm.c
100065@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v)
100066 struct bcm_sock *bo = bcm_sk(sk);
100067 struct bcm_op *op;
100068
100069+#ifdef CONFIG_GRKERNSEC_HIDESYM
100070+ seq_printf(m, ">>> socket %p", NULL);
100071+ seq_printf(m, " / sk %p", NULL);
100072+ seq_printf(m, " / bo %p", NULL);
100073+#else
100074 seq_printf(m, ">>> socket %p", sk->sk_socket);
100075 seq_printf(m, " / sk %p", sk);
100076 seq_printf(m, " / bo %p", bo);
100077+#endif
100078 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
100079 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
100080 seq_printf(m, " <<<\n");
100081diff --git a/net/compat.c b/net/compat.c
100082index 9559afc..ccd74e1 100644
100083--- a/net/compat.c
100084+++ b/net/compat.c
100085@@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
100086 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
100087 __get_user(kmsg->msg_flags, &umsg->msg_flags))
100088 return -EFAULT;
100089- kmsg->msg_name = compat_ptr(tmp1);
100090- kmsg->msg_iov = compat_ptr(tmp2);
100091- kmsg->msg_control = compat_ptr(tmp3);
100092+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
100093+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
100094+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
100095 return 0;
100096 }
100097
100098@@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
100099 kern_msg->msg_name = NULL;
100100
100101 tot_len = iov_from_user_compat_to_kern(kern_iov,
100102- (struct compat_iovec __user *)kern_msg->msg_iov,
100103+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
100104 kern_msg->msg_iovlen);
100105 if (tot_len >= 0)
100106 kern_msg->msg_iov = kern_iov;
100107@@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
100108
100109 #define CMSG_COMPAT_FIRSTHDR(msg) \
100110 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
100111- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
100112+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
100113 (struct compat_cmsghdr __user *)NULL)
100114
100115 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
100116 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
100117 (ucmlen) <= (unsigned long) \
100118 ((mhdr)->msg_controllen - \
100119- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
100120+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
100121
100122 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
100123 struct compat_cmsghdr __user *cmsg, int cmsg_len)
100124 {
100125 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
100126- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
100127+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
100128 msg->msg_controllen)
100129 return NULL;
100130 return (struct compat_cmsghdr __user *)ptr;
100131@@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
100132 {
100133 struct compat_timeval ctv;
100134 struct compat_timespec cts[3];
100135- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
100136+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
100137 struct compat_cmsghdr cmhdr;
100138 int cmlen;
100139
100140@@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
100141
100142 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
100143 {
100144- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
100145+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
100146 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
100147 int fdnum = scm->fp->count;
100148 struct file **fp = scm->fp->fp;
100149@@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
100150 len = sizeof(ktime);
100151 old_fs = get_fs();
100152 set_fs(KERNEL_DS);
100153- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
100154+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
100155 set_fs(old_fs);
100156
100157 if (!err) {
100158@@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
100159 case MCAST_JOIN_GROUP:
100160 case MCAST_LEAVE_GROUP:
100161 {
100162- struct compat_group_req __user *gr32 = (void *)optval;
100163+ struct compat_group_req __user *gr32 = (void __user *)optval;
100164 struct group_req __user *kgr =
100165 compat_alloc_user_space(sizeof(struct group_req));
100166 u32 interface;
100167@@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
100168 case MCAST_BLOCK_SOURCE:
100169 case MCAST_UNBLOCK_SOURCE:
100170 {
100171- struct compat_group_source_req __user *gsr32 = (void *)optval;
100172+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
100173 struct group_source_req __user *kgsr = compat_alloc_user_space(
100174 sizeof(struct group_source_req));
100175 u32 interface;
100176@@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
100177 }
100178 case MCAST_MSFILTER:
100179 {
100180- struct compat_group_filter __user *gf32 = (void *)optval;
100181+ struct compat_group_filter __user *gf32 = (void __user *)optval;
100182 struct group_filter __user *kgf;
100183 u32 interface, fmode, numsrc;
100184
100185diff --git a/net/core/dev.c b/net/core/dev.c
100186index 84a0705..575db4c 100644
100187--- a/net/core/dev.c
100188+++ b/net/core/dev.c
100189@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name)
100190 if (no_module && capable(CAP_NET_ADMIN))
100191 no_module = request_module("netdev-%s", name);
100192 if (no_module && capable(CAP_SYS_MODULE)) {
100193+#ifdef CONFIG_GRKERNSEC_MODHARDEN
100194+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
100195+#else
100196 if (!request_module("%s", name))
100197 pr_err("Loading kernel module for a network device "
100198 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
100199 "instead\n", name);
100200+#endif
100201 }
100202 }
100203 EXPORT_SYMBOL(dev_load);
100204@@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
100205
100206 struct dev_gso_cb {
100207 void (*destructor)(struct sk_buff *skb);
100208-};
100209+} __no_const;
100210
100211 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
100212
100213@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
100214 }
100215 EXPORT_SYMBOL(netif_rx_ni);
100216
100217-static void net_tx_action(struct softirq_action *h)
100218+static void net_tx_action(void)
100219 {
100220 struct softnet_data *sd = &__get_cpu_var(softnet_data);
100221
100222@@ -2827,7 +2831,7 @@ void netif_napi_del(struct napi_struct *napi)
100223 EXPORT_SYMBOL(netif_napi_del);
100224
100225
100226-static void net_rx_action(struct softirq_action *h)
100227+static void net_rx_action(void)
100228 {
100229 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
100230 unsigned long time_limit = jiffies + 2;
100231diff --git a/net/core/flow.c b/net/core/flow.c
100232index 9601587..8c4824e 100644
100233--- a/net/core/flow.c
100234+++ b/net/core/flow.c
100235@@ -35,11 +35,11 @@ struct flow_cache_entry {
100236 atomic_t *object_ref;
100237 };
100238
100239-atomic_t flow_cache_genid = ATOMIC_INIT(0);
100240+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
100241
100242 static u32 flow_hash_shift;
100243 #define flow_hash_size (1 << flow_hash_shift)
100244-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
100245+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
100246
100247 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
100248
100249@@ -52,7 +52,7 @@ struct flow_percpu_info {
100250 u32 hash_rnd;
100251 int count;
100252 };
100253-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
100254+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
100255
100256 #define flow_hash_rnd_recalc(cpu) \
100257 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
100258@@ -69,7 +69,7 @@ struct flow_flush_info {
100259 atomic_t cpuleft;
100260 struct completion completion;
100261 };
100262-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
100263+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
100264
100265 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
100266
100267@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
100268 if (fle->family == family &&
100269 fle->dir == dir &&
100270 flow_key_compare(key, &fle->key) == 0) {
100271- if (fle->genid == atomic_read(&flow_cache_genid)) {
100272+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
100273 void *ret = fle->object;
100274
100275 if (ret)
100276@@ -228,7 +228,7 @@ nocache:
100277 err = resolver(net, key, family, dir, &obj, &obj_ref);
100278
100279 if (fle && !err) {
100280- fle->genid = atomic_read(&flow_cache_genid);
100281+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
100282
100283 if (fle->object)
100284 atomic_dec(fle->object_ref);
100285@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
100286
100287 fle = flow_table(cpu)[i];
100288 for (; fle; fle = fle->next) {
100289- unsigned genid = atomic_read(&flow_cache_genid);
100290+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
100291
100292 if (!fle->object || fle->genid == genid)
100293 continue;
100294diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
100295index d4fd895..ac9b1e6 100644
100296--- a/net/core/rtnetlink.c
100297+++ b/net/core/rtnetlink.c
100298@@ -57,7 +57,7 @@ struct rtnl_link
100299 {
100300 rtnl_doit_func doit;
100301 rtnl_dumpit_func dumpit;
100302-};
100303+} __no_const;
100304
100305 static DEFINE_MUTEX(rtnl_mutex);
100306
100307diff --git a/net/core/scm.c b/net/core/scm.c
100308index d98eafc..1a190a9 100644
100309--- a/net/core/scm.c
100310+++ b/net/core/scm.c
100311@@ -191,7 +191,7 @@ error:
100312 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
100313 {
100314 struct cmsghdr __user *cm
100315- = (__force struct cmsghdr __user *)msg->msg_control;
100316+ = (struct cmsghdr __force_user *)msg->msg_control;
100317 struct cmsghdr cmhdr;
100318 int cmlen = CMSG_LEN(len);
100319 int err;
100320@@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
100321 err = -EFAULT;
100322 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
100323 goto out;
100324- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
100325+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
100326 goto out;
100327 cmlen = CMSG_SPACE(len);
100328 if (msg->msg_controllen < cmlen)
100329@@ -229,7 +229,7 @@ out:
100330 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
100331 {
100332 struct cmsghdr __user *cm
100333- = (__force struct cmsghdr __user*)msg->msg_control;
100334+ = (struct cmsghdr __force_user *)msg->msg_control;
100335
100336 int fdmax = 0;
100337 int fdnum = scm->fp->count;
100338@@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
100339 if (fdnum < fdmax)
100340 fdmax = fdnum;
100341
100342- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
100343+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
100344 i++, cmfptr++)
100345 {
100346 int new_fd;
100347diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
100348index 45329d7..626aaa6 100644
100349--- a/net/core/secure_seq.c
100350+++ b/net/core/secure_seq.c
100351@@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
100352 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
100353
100354 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
100355- __be16 dport)
100356+ __be16 dport)
100357 {
100358 u32 secret[MD5_MESSAGE_BYTES / 4];
100359 u32 hash[MD5_DIGEST_WORDS];
100360@@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
100361 secret[i] = net_secret[i];
100362
100363 md5_transform(hash, secret);
100364-
100365 return hash[0];
100366 }
100367 #endif
100368diff --git a/net/core/skbuff.c b/net/core/skbuff.c
100369index 025f924..70a71c4 100644
100370--- a/net/core/skbuff.c
100371+++ b/net/core/skbuff.c
100372@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
100373 struct sk_buff *frag_iter;
100374 struct sock *sk = skb->sk;
100375
100376+ pax_track_stack();
100377+
100378 /*
100379 * __skb_splice_bits() only fails if the output has no room left,
100380 * so no point in going over the frag_list for the error case.
100381diff --git a/net/core/sock.c b/net/core/sock.c
100382index 6605e75..3acebda 100644
100383--- a/net/core/sock.c
100384+++ b/net/core/sock.c
100385@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
100386 break;
100387
100388 case SO_PEERCRED:
100389+ {
100390+ struct ucred peercred;
100391 if (len > sizeof(sk->sk_peercred))
100392 len = sizeof(sk->sk_peercred);
100393- if (copy_to_user(optval, &sk->sk_peercred, len))
100394+ peercred = sk->sk_peercred;
100395+ if (copy_to_user(optval, &peercred, len))
100396 return -EFAULT;
100397 goto lenout;
100398+ }
100399
100400 case SO_PEERNAME:
100401 {
100402@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
100403 */
100404 smp_wmb();
100405 atomic_set(&sk->sk_refcnt, 1);
100406- atomic_set(&sk->sk_drops, 0);
100407+ atomic_set_unchecked(&sk->sk_drops, 0);
100408 }
100409 EXPORT_SYMBOL(sock_init_data);
100410
100411diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
100412index 2036568..c55883d 100644
100413--- a/net/decnet/sysctl_net_decnet.c
100414+++ b/net/decnet/sysctl_net_decnet.c
100415@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
100416
100417 if (len > *lenp) len = *lenp;
100418
100419- if (copy_to_user(buffer, addr, len))
100420+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
100421 return -EFAULT;
100422
100423 *lenp = len;
100424@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
100425
100426 if (len > *lenp) len = *lenp;
100427
100428- if (copy_to_user(buffer, devname, len))
100429+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
100430 return -EFAULT;
100431
100432 *lenp = len;
100433diff --git a/net/econet/Kconfig b/net/econet/Kconfig
100434index 39a2d29..f39c0fe 100644
100435--- a/net/econet/Kconfig
100436+++ b/net/econet/Kconfig
100437@@ -4,7 +4,7 @@
100438
100439 config ECONET
100440 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
100441- depends on EXPERIMENTAL && INET
100442+ depends on EXPERIMENTAL && INET && BROKEN
100443 ---help---
100444 Econet is a fairly old and slow networking protocol mainly used by
100445 Acorn computers to access file and print servers. It uses native
100446diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
100447index a413b1b..380849c 100644
100448--- a/net/ieee802154/dgram.c
100449+++ b/net/ieee802154/dgram.c
100450@@ -318,7 +318,7 @@ out:
100451 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
100452 {
100453 if (sock_queue_rcv_skb(sk, skb) < 0) {
100454- atomic_inc(&sk->sk_drops);
100455+ atomic_inc_unchecked(&sk->sk_drops);
100456 kfree_skb(skb);
100457 return NET_RX_DROP;
100458 }
100459diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
100460index 30e74ee..bfc6ee0 100644
100461--- a/net/ieee802154/raw.c
100462+++ b/net/ieee802154/raw.c
100463@@ -206,7 +206,7 @@ out:
100464 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
100465 {
100466 if (sock_queue_rcv_skb(sk, skb) < 0) {
100467- atomic_inc(&sk->sk_drops);
100468+ atomic_inc_unchecked(&sk->sk_drops);
100469 kfree_skb(skb);
100470 return NET_RX_DROP;
100471 }
100472diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
100473index dba56d2..acee5d6 100644
100474--- a/net/ipv4/inet_diag.c
100475+++ b/net/ipv4/inet_diag.c
100476@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk,
100477 r->idiag_retrans = 0;
100478
100479 r->id.idiag_if = sk->sk_bound_dev_if;
100480+#ifdef CONFIG_GRKERNSEC_HIDESYM
100481+ r->id.idiag_cookie[0] = 0;
100482+ r->id.idiag_cookie[1] = 0;
100483+#else
100484 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
100485 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
100486+#endif
100487
100488 r->id.idiag_sport = inet->sport;
100489 r->id.idiag_dport = inet->dport;
100490@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
100491 r->idiag_family = tw->tw_family;
100492 r->idiag_retrans = 0;
100493 r->id.idiag_if = tw->tw_bound_dev_if;
100494+
100495+#ifdef CONFIG_GRKERNSEC_HIDESYM
100496+ r->id.idiag_cookie[0] = 0;
100497+ r->id.idiag_cookie[1] = 0;
100498+#else
100499 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
100500 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
100501+#endif
100502+
100503 r->id.idiag_sport = tw->tw_sport;
100504 r->id.idiag_dport = tw->tw_dport;
100505 r->id.idiag_src[0] = tw->tw_rcv_saddr;
100506@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
100507 if (sk == NULL)
100508 goto unlock;
100509
100510+#ifndef CONFIG_GRKERNSEC_HIDESYM
100511 err = -ESTALE;
100512 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
100513 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
100514 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
100515 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
100516 goto out;
100517+#endif
100518
100519 err = -ENOMEM;
100520 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
100521@@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
100522 r->idiag_retrans = req->retrans;
100523
100524 r->id.idiag_if = sk->sk_bound_dev_if;
100525+
100526+#ifdef CONFIG_GRKERNSEC_HIDESYM
100527+ r->id.idiag_cookie[0] = 0;
100528+ r->id.idiag_cookie[1] = 0;
100529+#else
100530 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
100531 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
100532+#endif
100533
100534 tmo = req->expires - jiffies;
100535 if (tmo < 0)
100536diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
100537index d717267..56de7e7 100644
100538--- a/net/ipv4/inet_hashtables.c
100539+++ b/net/ipv4/inet_hashtables.c
100540@@ -18,12 +18,15 @@
100541 #include <linux/sched.h>
100542 #include <linux/slab.h>
100543 #include <linux/wait.h>
100544+#include <linux/security.h>
100545
100546 #include <net/inet_connection_sock.h>
100547 #include <net/inet_hashtables.h>
100548 #include <net/secure_seq.h>
100549 #include <net/ip.h>
100550
100551+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
100552+
100553 /*
100554 * Allocate and initialize a new local port bind bucket.
100555 * The bindhash mutex for snum's hash chain must be held here.
100556@@ -491,6 +494,8 @@ ok:
100557 }
100558 spin_unlock(&head->lock);
100559
100560+ gr_update_task_in_ip_table(current, inet_sk(sk));
100561+
100562 if (tw) {
100563 inet_twsk_deschedule(tw, death_row);
100564 inet_twsk_put(tw);
100565diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
100566index 13b229f..6956484 100644
100567--- a/net/ipv4/inetpeer.c
100568+++ b/net/ipv4/inetpeer.c
100569@@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
100570 struct inet_peer *p, *n;
100571 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
100572
100573+ pax_track_stack();
100574+
100575 /* Look up for the address quickly. */
100576 read_lock_bh(&peer_pool_lock);
100577 p = lookup(daddr, NULL);
100578@@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
100579 return NULL;
100580 n->v4daddr = daddr;
100581 atomic_set(&n->refcnt, 1);
100582- atomic_set(&n->rid, 0);
100583+ atomic_set_unchecked(&n->rid, 0);
100584 n->ip_id_count = secure_ip_id(daddr);
100585 n->tcp_ts_stamp = 0;
100586
100587diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
100588index d3fe10b..feeafc9 100644
100589--- a/net/ipv4/ip_fragment.c
100590+++ b/net/ipv4/ip_fragment.c
100591@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
100592 return 0;
100593
100594 start = qp->rid;
100595- end = atomic_inc_return(&peer->rid);
100596+ end = atomic_inc_return_unchecked(&peer->rid);
100597 qp->rid = end;
100598
100599 rc = qp->q.fragments && (end - start) > max;
100600diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
100601index e982b5c..f079d75 100644
100602--- a/net/ipv4/ip_sockglue.c
100603+++ b/net/ipv4/ip_sockglue.c
100604@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
100605 int val;
100606 int len;
100607
100608+ pax_track_stack();
100609+
100610 if (level != SOL_IP)
100611 return -EOPNOTSUPP;
100612
100613@@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
100614 if (sk->sk_type != SOCK_STREAM)
100615 return -ENOPROTOOPT;
100616
100617- msg.msg_control = optval;
100618+ msg.msg_control = (void __force_kernel *)optval;
100619 msg.msg_controllen = len;
100620 msg.msg_flags = 0;
100621
100622diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
100623index f8d04c2..c1188f2 100644
100624--- a/net/ipv4/ipconfig.c
100625+++ b/net/ipv4/ipconfig.c
100626@@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
100627
100628 mm_segment_t oldfs = get_fs();
100629 set_fs(get_ds());
100630- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
100631+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
100632 set_fs(oldfs);
100633 return res;
100634 }
100635@@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
100636
100637 mm_segment_t oldfs = get_fs();
100638 set_fs(get_ds());
100639- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
100640+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
100641 set_fs(oldfs);
100642 return res;
100643 }
100644@@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
100645
100646 mm_segment_t oldfs = get_fs();
100647 set_fs(get_ds());
100648- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
100649+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
100650 set_fs(oldfs);
100651 return res;
100652 }
100653diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
100654index c8b0cc3..05e4007 100644
100655--- a/net/ipv4/netfilter/arp_tables.c
100656+++ b/net/ipv4/netfilter/arp_tables.c
100657@@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
100658 private = &tmp;
100659 }
100660 #endif
100661+ memset(&info, 0, sizeof(info));
100662 info.valid_hooks = t->valid_hooks;
100663 memcpy(info.hook_entry, private->hook_entry,
100664 sizeof(info.hook_entry));
100665@@ -1003,6 +1004,11 @@ static int __do_replace(struct net *net, const char *name,
100666 unsigned int valid_hooks,
100667 struct xt_table_info *newinfo,
100668 unsigned int num_counters,
100669+ void __user *counters_ptr) __size_overflow(5);
100670+static int __do_replace(struct net *net, const char *name,
100671+ unsigned int valid_hooks,
100672+ struct xt_table_info *newinfo,
100673+ unsigned int num_counters,
100674 void __user *counters_ptr)
100675 {
100676 int ret;
100677@@ -1135,6 +1141,8 @@ add_counter_to_entry(struct arpt_entry *e,
100678 }
100679
100680 static int do_add_counters(struct net *net, void __user *user, unsigned int len,
100681+ int compat) __size_overflow(3);
100682+static int do_add_counters(struct net *net, void __user *user, unsigned int len,
100683 int compat)
100684 {
100685 unsigned int i, curcpu;
100686diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
100687index c156db2..e772975 100644
100688--- a/net/ipv4/netfilter/ip_queue.c
100689+++ b/net/ipv4/netfilter/ip_queue.c
100690@@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
100691
100692 if (v->data_len < sizeof(*user_iph))
100693 return 0;
100694+ if (v->data_len > 65535)
100695+ return -EMSGSIZE;
100696+
100697 diff = v->data_len - e->skb->len;
100698 if (diff < 0) {
100699 if (pskb_trim(e->skb, v->data_len))
100700@@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
100701 static inline void
100702 __ipq_rcv_skb(struct sk_buff *skb)
100703 {
100704- int status, type, pid, flags, nlmsglen, skblen;
100705+ int status, type, pid, flags;
100706+ unsigned int nlmsglen, skblen;
100707 struct nlmsghdr *nlh;
100708
100709 skblen = skb->len;
100710diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
100711index 0606db1..918b88a 100644
100712--- a/net/ipv4/netfilter/ip_tables.c
100713+++ b/net/ipv4/netfilter/ip_tables.c
100714@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
100715 private = &tmp;
100716 }
100717 #endif
100718+ memset(&info, 0, sizeof(info));
100719 info.valid_hooks = t->valid_hooks;
100720 memcpy(info.hook_entry, private->hook_entry,
100721 sizeof(info.hook_entry));
100722@@ -1208,6 +1209,10 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len)
100723 static int
100724 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
100725 struct xt_table_info *newinfo, unsigned int num_counters,
100726+ void __user *counters_ptr) __size_overflow(5);
100727+static int
100728+__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
100729+ struct xt_table_info *newinfo, unsigned int num_counters,
100730 void __user *counters_ptr)
100731 {
100732 int ret;
100733@@ -1339,6 +1344,8 @@ add_counter_to_entry(struct ipt_entry *e,
100734 }
100735
100736 static int
100737+do_add_counters(struct net *net, void __user *user, unsigned int len, int compat) __size_overflow(3);
100738+static int
100739 do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
100740 {
100741 unsigned int i, curcpu;
100742diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
100743index d9521f6..127fa44 100644
100744--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
100745+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
100746@@ -436,6 +436,10 @@ static unsigned char asn1_subid_decode(struct asn1_ctx *ctx,
100747 static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
100748 unsigned char *eoc,
100749 unsigned long **oid,
100750+ unsigned int *len) __size_overflow(2);
100751+static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
100752+ unsigned char *eoc,
100753+ unsigned long **oid,
100754 unsigned int *len)
100755 {
100756 unsigned long subid;
100757diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
100758index ab996f9..3da5f96 100644
100759--- a/net/ipv4/raw.c
100760+++ b/net/ipv4/raw.c
100761@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
100762 /* Charge it to the socket. */
100763
100764 if (sock_queue_rcv_skb(sk, skb) < 0) {
100765- atomic_inc(&sk->sk_drops);
100766+ atomic_inc_unchecked(&sk->sk_drops);
100767 kfree_skb(skb);
100768 return NET_RX_DROP;
100769 }
100770@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
100771 int raw_rcv(struct sock *sk, struct sk_buff *skb)
100772 {
100773 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
100774- atomic_inc(&sk->sk_drops);
100775+ atomic_inc_unchecked(&sk->sk_drops);
100776 kfree_skb(skb);
100777 return NET_RX_DROP;
100778 }
100779@@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
100780
100781 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
100782 {
100783+ struct icmp_filter filter;
100784+
100785+ if (optlen < 0)
100786+ return -EINVAL;
100787 if (optlen > sizeof(struct icmp_filter))
100788 optlen = sizeof(struct icmp_filter);
100789- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
100790+ if (copy_from_user(&filter, optval, optlen))
100791 return -EFAULT;
100792+ raw_sk(sk)->filter = filter;
100793+
100794 return 0;
100795 }
100796
100797 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
100798 {
100799 int len, ret = -EFAULT;
100800+ struct icmp_filter filter;
100801
100802 if (get_user(len, optlen))
100803 goto out;
100804@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
100805 if (len > sizeof(struct icmp_filter))
100806 len = sizeof(struct icmp_filter);
100807 ret = -EFAULT;
100808- if (put_user(len, optlen) ||
100809- copy_to_user(optval, &raw_sk(sk)->filter, len))
100810+ filter = raw_sk(sk)->filter;
100811+ if (put_user(len, optlen) || len > sizeof filter ||
100812+ copy_to_user(optval, &filter, len))
100813 goto out;
100814 ret = 0;
100815 out: return ret;
100816@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
100817 sk_wmem_alloc_get(sp),
100818 sk_rmem_alloc_get(sp),
100819 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
100820- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
100821+ atomic_read(&sp->sk_refcnt),
100822+#ifdef CONFIG_GRKERNSEC_HIDESYM
100823+ NULL,
100824+#else
100825+ sp,
100826+#endif
100827+ atomic_read_unchecked(&sp->sk_drops));
100828 }
100829
100830 static int raw_seq_show(struct seq_file *seq, void *v)
100831diff --git a/net/ipv4/route.c b/net/ipv4/route.c
100832index 58f141b..b759702 100644
100833--- a/net/ipv4/route.c
100834+++ b/net/ipv4/route.c
100835@@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
100836
100837 static inline int rt_genid(struct net *net)
100838 {
100839- return atomic_read(&net->ipv4.rt_genid);
100840+ return atomic_read_unchecked(&net->ipv4.rt_genid);
100841 }
100842
100843 #ifdef CONFIG_PROC_FS
100844@@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net)
100845 unsigned char shuffle;
100846
100847 get_random_bytes(&shuffle, sizeof(shuffle));
100848- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
100849+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
100850 }
100851
100852 /*
100853@@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
100854
100855 static __net_init int rt_secret_timer_init(struct net *net)
100856 {
100857- atomic_set(&net->ipv4.rt_genid,
100858+ atomic_set_unchecked(&net->ipv4.rt_genid,
100859 (int) ((num_physpages ^ (num_physpages>>8)) ^
100860 (jiffies ^ (jiffies >> 7))));
100861
100862diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
100863index f095659..adc892a 100644
100864--- a/net/ipv4/tcp.c
100865+++ b/net/ipv4/tcp.c
100866@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
100867 int val;
100868 int err = 0;
100869
100870+ pax_track_stack();
100871+
100872 /* This is a string value all the others are int's */
100873 if (optname == TCP_CONGESTION) {
100874 char name[TCP_CA_NAME_MAX];
100875@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
100876 struct tcp_sock *tp = tcp_sk(sk);
100877 int val, len;
100878
100879+ pax_track_stack();
100880+
100881 if (get_user(len, optlen))
100882 return -EFAULT;
100883
100884diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
100885index 6fc7961..33bad4a 100644
100886--- a/net/ipv4/tcp_ipv4.c
100887+++ b/net/ipv4/tcp_ipv4.c
100888@@ -85,6 +85,9 @@
100889 int sysctl_tcp_tw_reuse __read_mostly;
100890 int sysctl_tcp_low_latency __read_mostly;
100891
100892+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100893+extern int grsec_enable_blackhole;
100894+#endif
100895
100896 #ifdef CONFIG_TCP_MD5SIG
100897 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
100898@@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
100899 return 0;
100900
100901 reset:
100902+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100903+ if (!grsec_enable_blackhole)
100904+#endif
100905 tcp_v4_send_reset(rsk, skb);
100906 discard:
100907 kfree_skb(skb);
100908@@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
100909 TCP_SKB_CB(skb)->sacked = 0;
100910
100911 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
100912- if (!sk)
100913+ if (!sk) {
100914+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100915+ ret = 1;
100916+#endif
100917 goto no_tcp_socket;
100918+ }
100919
100920 process:
100921- if (sk->sk_state == TCP_TIME_WAIT)
100922+ if (sk->sk_state == TCP_TIME_WAIT) {
100923+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100924+ ret = 2;
100925+#endif
100926 goto do_time_wait;
100927+ }
100928
100929 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
100930 goto discard_and_relse;
100931@@ -1651,6 +1665,10 @@ no_tcp_socket:
100932 bad_packet:
100933 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
100934 } else {
100935+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100936+ if (!grsec_enable_blackhole || (ret == 1 &&
100937+ (skb->dev->flags & IFF_LOOPBACK)))
100938+#endif
100939 tcp_v4_send_reset(NULL, skb);
100940 }
100941
100942@@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
100943 0, /* non standard timer */
100944 0, /* open_requests have no inode */
100945 atomic_read(&sk->sk_refcnt),
100946+#ifdef CONFIG_GRKERNSEC_HIDESYM
100947+ NULL,
100948+#else
100949 req,
100950+#endif
100951 len);
100952 }
100953
100954@@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
100955 sock_i_uid(sk),
100956 icsk->icsk_probes_out,
100957 sock_i_ino(sk),
100958- atomic_read(&sk->sk_refcnt), sk,
100959+ atomic_read(&sk->sk_refcnt),
100960+#ifdef CONFIG_GRKERNSEC_HIDESYM
100961+ NULL,
100962+#else
100963+ sk,
100964+#endif
100965 jiffies_to_clock_t(icsk->icsk_rto),
100966 jiffies_to_clock_t(icsk->icsk_ack.ato),
100967 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
100968@@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
100969 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
100970 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
100971 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
100972- atomic_read(&tw->tw_refcnt), tw, len);
100973+ atomic_read(&tw->tw_refcnt),
100974+#ifdef CONFIG_GRKERNSEC_HIDESYM
100975+ NULL,
100976+#else
100977+ tw,
100978+#endif
100979+ len);
100980 }
100981
100982 #define TMPSZ 150
100983diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
100984index 4c03598..e09a8e8 100644
100985--- a/net/ipv4/tcp_minisocks.c
100986+++ b/net/ipv4/tcp_minisocks.c
100987@@ -26,6 +26,10 @@
100988 #include <net/inet_common.h>
100989 #include <net/xfrm.h>
100990
100991+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100992+extern int grsec_enable_blackhole;
100993+#endif
100994+
100995 #ifdef CONFIG_SYSCTL
100996 #define SYNC_INIT 0 /* let the user enable it */
100997 #else
100998@@ -672,6 +676,10 @@ listen_overflow:
100999
101000 embryonic_reset:
101001 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
101002+
101003+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101004+ if (!grsec_enable_blackhole)
101005+#endif
101006 if (!(flg & TCP_FLAG_RST))
101007 req->rsk_ops->send_reset(sk, skb);
101008
101009diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
101010index af83bdf..ec91cb2 100644
101011--- a/net/ipv4/tcp_output.c
101012+++ b/net/ipv4/tcp_output.c
101013@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
101014 __u8 *md5_hash_location;
101015 int mss;
101016
101017+ pax_track_stack();
101018+
101019 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
101020 if (skb == NULL)
101021 return NULL;
101022diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
101023index 59f5b5e..193860f 100644
101024--- a/net/ipv4/tcp_probe.c
101025+++ b/net/ipv4/tcp_probe.c
101026@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
101027 if (cnt + width >= len)
101028 break;
101029
101030- if (copy_to_user(buf + cnt, tbuf, width))
101031+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
101032 return -EFAULT;
101033 cnt += width;
101034 }
101035diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
101036index 57d5501..a9ed13a 100644
101037--- a/net/ipv4/tcp_timer.c
101038+++ b/net/ipv4/tcp_timer.c
101039@@ -21,6 +21,10 @@
101040 #include <linux/module.h>
101041 #include <net/tcp.h>
101042
101043+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101044+extern int grsec_lastack_retries;
101045+#endif
101046+
101047 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
101048 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
101049 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
101050@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk)
101051 }
101052 }
101053
101054+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101055+ if ((sk->sk_state == TCP_LAST_ACK) &&
101056+ (grsec_lastack_retries > 0) &&
101057+ (grsec_lastack_retries < retry_until))
101058+ retry_until = grsec_lastack_retries;
101059+#endif
101060+
101061 if (retransmits_timed_out(sk, retry_until)) {
101062 /* Has it gone just too far? */
101063 tcp_write_err(sk);
101064diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
101065index 8e28770..72105c8 100644
101066--- a/net/ipv4/udp.c
101067+++ b/net/ipv4/udp.c
101068@@ -86,6 +86,7 @@
101069 #include <linux/types.h>
101070 #include <linux/fcntl.h>
101071 #include <linux/module.h>
101072+#include <linux/security.h>
101073 #include <linux/socket.h>
101074 #include <linux/sockios.h>
101075 #include <linux/igmp.h>
101076@@ -106,6 +107,10 @@
101077 #include <net/xfrm.h>
101078 #include "udp_impl.h"
101079
101080+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101081+extern int grsec_enable_blackhole;
101082+#endif
101083+
101084 struct udp_table udp_table;
101085 EXPORT_SYMBOL(udp_table);
101086
101087@@ -371,6 +376,9 @@ found:
101088 return s;
101089 }
101090
101091+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
101092+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
101093+
101094 /*
101095 * This routine is called by the ICMP module when it gets some
101096 * sort of error condition. If err < 0 then the socket should
101097@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
101098 dport = usin->sin_port;
101099 if (dport == 0)
101100 return -EINVAL;
101101+
101102+ err = gr_search_udp_sendmsg(sk, usin);
101103+ if (err)
101104+ return err;
101105 } else {
101106 if (sk->sk_state != TCP_ESTABLISHED)
101107 return -EDESTADDRREQ;
101108+
101109+ err = gr_search_udp_sendmsg(sk, NULL);
101110+ if (err)
101111+ return err;
101112+
101113 daddr = inet->daddr;
101114 dport = inet->dport;
101115 /* Open fast path for connected socket.
101116@@ -945,6 +962,10 @@ try_again:
101117 if (!skb)
101118 goto out;
101119
101120+ err = gr_search_udp_recvmsg(sk, skb);
101121+ if (err)
101122+ goto out_free;
101123+
101124 ulen = skb->len - sizeof(struct udphdr);
101125 copied = len;
101126 if (copied > ulen)
101127@@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101128 if (rc == -ENOMEM) {
101129 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
101130 is_udplite);
101131- atomic_inc(&sk->sk_drops);
101132+ atomic_inc_unchecked(&sk->sk_drops);
101133 }
101134 goto drop;
101135 }
101136@@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
101137 goto csum_error;
101138
101139 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
101140+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101141+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
101142+#endif
101143 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
101144
101145 /*
101146@@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
101147 sk_wmem_alloc_get(sp),
101148 sk_rmem_alloc_get(sp),
101149 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
101150- atomic_read(&sp->sk_refcnt), sp,
101151- atomic_read(&sp->sk_drops), len);
101152+ atomic_read(&sp->sk_refcnt),
101153+#ifdef CONFIG_GRKERNSEC_HIDESYM
101154+ NULL,
101155+#else
101156+ sp,
101157+#endif
101158+ atomic_read_unchecked(&sp->sk_drops), len);
101159 }
101160
101161 int udp4_seq_show(struct seq_file *seq, void *v)
101162diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
101163index 8ac3d09..fc58c5f 100644
101164--- a/net/ipv6/addrconf.c
101165+++ b/net/ipv6/addrconf.c
101166@@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
101167 p.iph.ihl = 5;
101168 p.iph.protocol = IPPROTO_IPV6;
101169 p.iph.ttl = 64;
101170- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
101171+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
101172
101173 if (ops->ndo_do_ioctl) {
101174 mm_segment_t oldfs = get_fs();
101175diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
101176index cc4797d..7cfdfcc 100644
101177--- a/net/ipv6/inet6_connection_sock.c
101178+++ b/net/ipv6/inet6_connection_sock.c
101179@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
101180 #ifdef CONFIG_XFRM
101181 {
101182 struct rt6_info *rt = (struct rt6_info *)dst;
101183- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
101184+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
101185 }
101186 #endif
101187 }
101188@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
101189 #ifdef CONFIG_XFRM
101190 if (dst) {
101191 struct rt6_info *rt = (struct rt6_info *)dst;
101192- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
101193+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
101194 sk->sk_dst_cache = NULL;
101195 dst_release(dst);
101196 dst = NULL;
101197diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
101198index 093e9b2..f72cddb 100644
101199--- a/net/ipv6/inet6_hashtables.c
101200+++ b/net/ipv6/inet6_hashtables.c
101201@@ -119,7 +119,7 @@ out:
101202 }
101203 EXPORT_SYMBOL(__inet6_lookup_established);
101204
101205-static int inline compute_score(struct sock *sk, struct net *net,
101206+static inline int compute_score(struct sock *sk, struct net *net,
101207 const unsigned short hnum,
101208 const struct in6_addr *daddr,
101209 const int dif)
101210diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
101211index 4f7aaf6..f7acf45 100644
101212--- a/net/ipv6/ipv6_sockglue.c
101213+++ b/net/ipv6/ipv6_sockglue.c
101214@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
101215 int val, valbool;
101216 int retv = -ENOPROTOOPT;
101217
101218+ pax_track_stack();
101219+
101220 if (optval == NULL)
101221 val=0;
101222 else {
101223@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
101224 int len;
101225 int val;
101226
101227+ pax_track_stack();
101228+
101229 if (ip6_mroute_opt(optname))
101230 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
101231
101232@@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
101233 if (sk->sk_type != SOCK_STREAM)
101234 return -ENOPROTOOPT;
101235
101236- msg.msg_control = optval;
101237+ msg.msg_control = (void __force_kernel *)optval;
101238 msg.msg_controllen = len;
101239 msg.msg_flags = 0;
101240
101241diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
101242index 1cf3f0c..1d4376f 100644
101243--- a/net/ipv6/netfilter/ip6_queue.c
101244+++ b/net/ipv6/netfilter/ip6_queue.c
101245@@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
101246
101247 if (v->data_len < sizeof(*user_iph))
101248 return 0;
101249+ if (v->data_len > 65535)
101250+ return -EMSGSIZE;
101251+
101252 diff = v->data_len - e->skb->len;
101253 if (diff < 0) {
101254 if (pskb_trim(e->skb, v->data_len))
101255@@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
101256 static inline void
101257 __ipq_rcv_skb(struct sk_buff *skb)
101258 {
101259- int status, type, pid, flags, nlmsglen, skblen;
101260+ int status, type, pid, flags;
101261+ unsigned int nlmsglen, skblen;
101262 struct nlmsghdr *nlh;
101263
101264 skblen = skb->len;
101265diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
101266index 78b5a36..2b9bb06 100644
101267--- a/net/ipv6/netfilter/ip6_tables.c
101268+++ b/net/ipv6/netfilter/ip6_tables.c
101269@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
101270 private = &tmp;
101271 }
101272 #endif
101273+ memset(&info, 0, sizeof(info));
101274 info.valid_hooks = t->valid_hooks;
101275 memcpy(info.hook_entry, private->hook_entry,
101276 sizeof(info.hook_entry));
101277@@ -1240,6 +1241,10 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
101278 static int
101279 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
101280 struct xt_table_info *newinfo, unsigned int num_counters,
101281+ void __user *counters_ptr) __size_overflow(5);
101282+static int
101283+__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
101284+ struct xt_table_info *newinfo, unsigned int num_counters,
101285 void __user *counters_ptr)
101286 {
101287 int ret;
101288@@ -1373,6 +1378,9 @@ add_counter_to_entry(struct ip6t_entry *e,
101289
101290 static int
101291 do_add_counters(struct net *net, void __user *user, unsigned int len,
101292+ int compat) __size_overflow(3);
101293+static int
101294+do_add_counters(struct net *net, void __user *user, unsigned int len,
101295 int compat)
101296 {
101297 unsigned int i, curcpu;
101298diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
101299index 4f24570..b813b34 100644
101300--- a/net/ipv6/raw.c
101301+++ b/net/ipv6/raw.c
101302@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
101303 {
101304 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
101305 skb_checksum_complete(skb)) {
101306- atomic_inc(&sk->sk_drops);
101307+ atomic_inc_unchecked(&sk->sk_drops);
101308 kfree_skb(skb);
101309 return NET_RX_DROP;
101310 }
101311
101312 /* Charge it to the socket. */
101313 if (sock_queue_rcv_skb(sk,skb)<0) {
101314- atomic_inc(&sk->sk_drops);
101315+ atomic_inc_unchecked(&sk->sk_drops);
101316 kfree_skb(skb);
101317 return NET_RX_DROP;
101318 }
101319@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
101320 struct raw6_sock *rp = raw6_sk(sk);
101321
101322 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
101323- atomic_inc(&sk->sk_drops);
101324+ atomic_inc_unchecked(&sk->sk_drops);
101325 kfree_skb(skb);
101326 return NET_RX_DROP;
101327 }
101328@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
101329
101330 if (inet->hdrincl) {
101331 if (skb_checksum_complete(skb)) {
101332- atomic_inc(&sk->sk_drops);
101333+ atomic_inc_unchecked(&sk->sk_drops);
101334 kfree_skb(skb);
101335 return NET_RX_DROP;
101336 }
101337@@ -518,7 +518,7 @@ csum_copy_err:
101338 as some normal condition.
101339 */
101340 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
101341- atomic_inc(&sk->sk_drops);
101342+ atomic_inc_unchecked(&sk->sk_drops);
101343 goto out;
101344 }
101345
101346@@ -600,7 +600,7 @@ out:
101347 return err;
101348 }
101349
101350-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
101351+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
101352 struct flowi *fl, struct rt6_info *rt,
101353 unsigned int flags)
101354 {
101355@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
101356 u16 proto;
101357 int err;
101358
101359+ pax_track_stack();
101360+
101361 /* Rough check on arithmetic overflow,
101362 better check is made in ip6_append_data().
101363 */
101364@@ -916,12 +918,17 @@ do_confirm:
101365 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
101366 char __user *optval, int optlen)
101367 {
101368+ struct icmp6_filter filter;
101369+
101370 switch (optname) {
101371 case ICMPV6_FILTER:
101372+ if (optlen < 0)
101373+ return -EINVAL;
101374 if (optlen > sizeof(struct icmp6_filter))
101375 optlen = sizeof(struct icmp6_filter);
101376- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
101377+ if (copy_from_user(&filter, optval, optlen))
101378 return -EFAULT;
101379+ raw6_sk(sk)->filter = filter;
101380 return 0;
101381 default:
101382 return -ENOPROTOOPT;
101383@@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
101384 char __user *optval, int __user *optlen)
101385 {
101386 int len;
101387+ struct icmp6_filter filter;
101388
101389 switch (optname) {
101390 case ICMPV6_FILTER:
101391@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
101392 len = sizeof(struct icmp6_filter);
101393 if (put_user(len, optlen))
101394 return -EFAULT;
101395- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
101396+ filter = raw6_sk(sk)->filter;
101397+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
101398 return -EFAULT;
101399 return 0;
101400 default:
101401@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
101402 0, 0L, 0,
101403 sock_i_uid(sp), 0,
101404 sock_i_ino(sp),
101405- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
101406+ atomic_read(&sp->sk_refcnt),
101407+#ifdef CONFIG_GRKERNSEC_HIDESYM
101408+ NULL,
101409+#else
101410+ sp,
101411+#endif
101412+ atomic_read_unchecked(&sp->sk_drops));
101413 }
101414
101415 static int raw6_seq_show(struct seq_file *seq, void *v)
101416diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
101417index faae6df..d4430c1 100644
101418--- a/net/ipv6/tcp_ipv6.c
101419+++ b/net/ipv6/tcp_ipv6.c
101420@@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
101421 }
101422 #endif
101423
101424+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101425+extern int grsec_enable_blackhole;
101426+#endif
101427+
101428 static void tcp_v6_hash(struct sock *sk)
101429 {
101430 if (sk->sk_state != TCP_CLOSE) {
101431@@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
101432 return 0;
101433
101434 reset:
101435+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101436+ if (!grsec_enable_blackhole)
101437+#endif
101438 tcp_v6_send_reset(sk, skb);
101439 discard:
101440 if (opt_skb)
101441@@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
101442 TCP_SKB_CB(skb)->sacked = 0;
101443
101444 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
101445- if (!sk)
101446+ if (!sk) {
101447+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101448+ ret = 1;
101449+#endif
101450 goto no_tcp_socket;
101451+ }
101452
101453 process:
101454- if (sk->sk_state == TCP_TIME_WAIT)
101455+ if (sk->sk_state == TCP_TIME_WAIT) {
101456+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101457+ ret = 2;
101458+#endif
101459 goto do_time_wait;
101460+ }
101461
101462 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
101463 goto discard_and_relse;
101464@@ -1701,6 +1716,10 @@ no_tcp_socket:
101465 bad_packet:
101466 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
101467 } else {
101468+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101469+ if (!grsec_enable_blackhole || (ret == 1 &&
101470+ (skb->dev->flags & IFF_LOOPBACK)))
101471+#endif
101472 tcp_v6_send_reset(NULL, skb);
101473 }
101474
101475@@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq,
101476 uid,
101477 0, /* non standard timer */
101478 0, /* open_requests have no inode */
101479- 0, req);
101480+ 0,
101481+#ifdef CONFIG_GRKERNSEC_HIDESYM
101482+ NULL
101483+#else
101484+ req
101485+#endif
101486+ );
101487 }
101488
101489 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
101490@@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
101491 sock_i_uid(sp),
101492 icsk->icsk_probes_out,
101493 sock_i_ino(sp),
101494- atomic_read(&sp->sk_refcnt), sp,
101495+ atomic_read(&sp->sk_refcnt),
101496+#ifdef CONFIG_GRKERNSEC_HIDESYM
101497+ NULL,
101498+#else
101499+ sp,
101500+#endif
101501 jiffies_to_clock_t(icsk->icsk_rto),
101502 jiffies_to_clock_t(icsk->icsk_ack.ato),
101503 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
101504@@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq,
101505 dest->s6_addr32[2], dest->s6_addr32[3], destp,
101506 tw->tw_substate, 0, 0,
101507 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
101508- atomic_read(&tw->tw_refcnt), tw);
101509+ atomic_read(&tw->tw_refcnt),
101510+#ifdef CONFIG_GRKERNSEC_HIDESYM
101511+ NULL
101512+#else
101513+ tw
101514+#endif
101515+ );
101516 }
101517
101518 static int tcp6_seq_show(struct seq_file *seq, void *v)
101519diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
101520index 9cc6289..052c521 100644
101521--- a/net/ipv6/udp.c
101522+++ b/net/ipv6/udp.c
101523@@ -49,6 +49,10 @@
101524 #include <linux/seq_file.h>
101525 #include "udp_impl.h"
101526
101527+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101528+extern int grsec_enable_blackhole;
101529+#endif
101530+
101531 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
101532 {
101533 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
101534@@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
101535 if (rc == -ENOMEM) {
101536 UDP6_INC_STATS_BH(sock_net(sk),
101537 UDP_MIB_RCVBUFERRORS, is_udplite);
101538- atomic_inc(&sk->sk_drops);
101539+ atomic_inc_unchecked(&sk->sk_drops);
101540 }
101541 goto drop;
101542 }
101543@@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
101544 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
101545 proto == IPPROTO_UDPLITE);
101546
101547+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101548+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
101549+#endif
101550 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
101551
101552 kfree_skb(skb);
101553@@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
101554 0, 0L, 0,
101555 sock_i_uid(sp), 0,
101556 sock_i_ino(sp),
101557- atomic_read(&sp->sk_refcnt), sp,
101558- atomic_read(&sp->sk_drops));
101559+ atomic_read(&sp->sk_refcnt),
101560+#ifdef CONFIG_GRKERNSEC_HIDESYM
101561+ NULL,
101562+#else
101563+ sp,
101564+#endif
101565+ atomic_read_unchecked(&sp->sk_drops));
101566 }
101567
101568 int udp6_seq_show(struct seq_file *seq, void *v)
101569diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
101570index 48bb1e3..5980e6e 100644
101571--- a/net/ipv6/xfrm6_tunnel.c
101572+++ b/net/ipv6/xfrm6_tunnel.c
101573@@ -258,7 +258,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
101574 __be32 spi;
101575
101576 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
101577- return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0;
101578+ return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi);
101579 }
101580
101581 static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
101582diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
101583index 811984d..11f59b7 100644
101584--- a/net/irda/ircomm/ircomm_tty.c
101585+++ b/net/irda/ircomm/ircomm_tty.c
101586@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
101587 add_wait_queue(&self->open_wait, &wait);
101588
101589 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
101590- __FILE__,__LINE__, tty->driver->name, self->open_count );
101591+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
101592
101593 /* As far as I can see, we protect open_count - Jean II */
101594 spin_lock_irqsave(&self->spinlock, flags);
101595 if (!tty_hung_up_p(filp)) {
101596 extra_count = 1;
101597- self->open_count--;
101598+ local_dec(&self->open_count);
101599 }
101600 spin_unlock_irqrestore(&self->spinlock, flags);
101601- self->blocked_open++;
101602+ local_inc(&self->blocked_open);
101603
101604 while (1) {
101605 if (tty->termios->c_cflag & CBAUD) {
101606@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
101607 }
101608
101609 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
101610- __FILE__,__LINE__, tty->driver->name, self->open_count );
101611+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
101612
101613 schedule();
101614 }
101615@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
101616 if (extra_count) {
101617 /* ++ is not atomic, so this should be protected - Jean II */
101618 spin_lock_irqsave(&self->spinlock, flags);
101619- self->open_count++;
101620+ local_inc(&self->open_count);
101621 spin_unlock_irqrestore(&self->spinlock, flags);
101622 }
101623- self->blocked_open--;
101624+ local_dec(&self->blocked_open);
101625
101626 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
101627- __FILE__,__LINE__, tty->driver->name, self->open_count);
101628+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
101629
101630 if (!retval)
101631 self->flags |= ASYNC_NORMAL_ACTIVE;
101632@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
101633 }
101634 /* ++ is not atomic, so this should be protected - Jean II */
101635 spin_lock_irqsave(&self->spinlock, flags);
101636- self->open_count++;
101637+ local_inc(&self->open_count);
101638
101639 tty->driver_data = self;
101640 self->tty = tty;
101641 spin_unlock_irqrestore(&self->spinlock, flags);
101642
101643 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
101644- self->line, self->open_count);
101645+ self->line, local_read(&self->open_count));
101646
101647 /* Not really used by us, but lets do it anyway */
101648 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
101649@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
101650 return;
101651 }
101652
101653- if ((tty->count == 1) && (self->open_count != 1)) {
101654+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
101655 /*
101656 * Uh, oh. tty->count is 1, which means that the tty
101657 * structure will be freed. state->count should always
101658@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
101659 */
101660 IRDA_DEBUG(0, "%s(), bad serial port count; "
101661 "tty->count is 1, state->count is %d\n", __func__ ,
101662- self->open_count);
101663- self->open_count = 1;
101664+ local_read(&self->open_count));
101665+ local_set(&self->open_count, 1);
101666 }
101667
101668- if (--self->open_count < 0) {
101669+ if (local_dec_return(&self->open_count) < 0) {
101670 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
101671- __func__, self->line, self->open_count);
101672- self->open_count = 0;
101673+ __func__, self->line, local_read(&self->open_count));
101674+ local_set(&self->open_count, 0);
101675 }
101676- if (self->open_count) {
101677+ if (local_read(&self->open_count)) {
101678 spin_unlock_irqrestore(&self->spinlock, flags);
101679
101680 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
101681@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
101682 tty->closing = 0;
101683 self->tty = NULL;
101684
101685- if (self->blocked_open) {
101686+ if (local_read(&self->blocked_open)) {
101687 if (self->close_delay)
101688 schedule_timeout_interruptible(self->close_delay);
101689 wake_up_interruptible(&self->open_wait);
101690@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
101691 spin_lock_irqsave(&self->spinlock, flags);
101692 self->flags &= ~ASYNC_NORMAL_ACTIVE;
101693 self->tty = NULL;
101694- self->open_count = 0;
101695+ local_set(&self->open_count, 0);
101696 spin_unlock_irqrestore(&self->spinlock, flags);
101697
101698 wake_up_interruptible(&self->open_wait);
101699@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
101700 seq_putc(m, '\n');
101701
101702 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
101703- seq_printf(m, "Open count: %d\n", self->open_count);
101704+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
101705 seq_printf(m, "Max data size: %d\n", self->max_data_size);
101706 seq_printf(m, "Max header size: %d\n", self->max_header_size);
101707
101708diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
101709index bada1b9..f325943 100644
101710--- a/net/iucv/af_iucv.c
101711+++ b/net/iucv/af_iucv.c
101712@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk)
101713
101714 write_lock_bh(&iucv_sk_list.lock);
101715
101716- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
101717+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
101718 while (__iucv_get_sock_by_name(name)) {
101719 sprintf(name, "%08x",
101720- atomic_inc_return(&iucv_sk_list.autobind_name));
101721+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
101722 }
101723
101724 write_unlock_bh(&iucv_sk_list.lock);
101725diff --git a/net/key/af_key.c b/net/key/af_key.c
101726index 4e98193..439b449 100644
101727--- a/net/key/af_key.c
101728+++ b/net/key/af_key.c
101729@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
101730 struct xfrm_migrate m[XFRM_MAX_DEPTH];
101731 struct xfrm_kmaddress k;
101732
101733+ pax_track_stack();
101734+
101735 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
101736 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
101737 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
101738@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
101739 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
101740 else
101741 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
101742+#ifdef CONFIG_GRKERNSEC_HIDESYM
101743+ NULL,
101744+#else
101745 s,
101746+#endif
101747 atomic_read(&s->sk_refcnt),
101748 sk_rmem_alloc_get(s),
101749 sk_wmem_alloc_get(s),
101750diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
101751index bda96d1..c038b72 100644
101752--- a/net/lapb/lapb_iface.c
101753+++ b/net/lapb/lapb_iface.c
101754@@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
101755 goto out;
101756
101757 lapb->dev = dev;
101758- lapb->callbacks = *callbacks;
101759+ lapb->callbacks = callbacks;
101760
101761 __lapb_insert_cb(lapb);
101762
101763@@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
101764
101765 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
101766 {
101767- if (lapb->callbacks.connect_confirmation)
101768- lapb->callbacks.connect_confirmation(lapb->dev, reason);
101769+ if (lapb->callbacks->connect_confirmation)
101770+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
101771 }
101772
101773 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
101774 {
101775- if (lapb->callbacks.connect_indication)
101776- lapb->callbacks.connect_indication(lapb->dev, reason);
101777+ if (lapb->callbacks->connect_indication)
101778+ lapb->callbacks->connect_indication(lapb->dev, reason);
101779 }
101780
101781 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
101782 {
101783- if (lapb->callbacks.disconnect_confirmation)
101784- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
101785+ if (lapb->callbacks->disconnect_confirmation)
101786+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
101787 }
101788
101789 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
101790 {
101791- if (lapb->callbacks.disconnect_indication)
101792- lapb->callbacks.disconnect_indication(lapb->dev, reason);
101793+ if (lapb->callbacks->disconnect_indication)
101794+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
101795 }
101796
101797 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
101798 {
101799- if (lapb->callbacks.data_indication)
101800- return lapb->callbacks.data_indication(lapb->dev, skb);
101801+ if (lapb->callbacks->data_indication)
101802+ return lapb->callbacks->data_indication(lapb->dev, skb);
101803
101804 kfree_skb(skb);
101805 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
101806@@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
101807 {
101808 int used = 0;
101809
101810- if (lapb->callbacks.data_transmit) {
101811- lapb->callbacks.data_transmit(lapb->dev, skb);
101812+ if (lapb->callbacks->data_transmit) {
101813+ lapb->callbacks->data_transmit(lapb->dev, skb);
101814 used = 1;
101815 }
101816
101817diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
101818index fe2d3f8..e57f683 100644
101819--- a/net/mac80211/cfg.c
101820+++ b/net/mac80211/cfg.c
101821@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
101822 return err;
101823 }
101824
101825-struct cfg80211_ops mac80211_config_ops = {
101826+const struct cfg80211_ops mac80211_config_ops = {
101827 .add_virtual_intf = ieee80211_add_iface,
101828 .del_virtual_intf = ieee80211_del_iface,
101829 .change_virtual_intf = ieee80211_change_iface,
101830diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
101831index 7d7879f..2d51f62 100644
101832--- a/net/mac80211/cfg.h
101833+++ b/net/mac80211/cfg.h
101834@@ -4,6 +4,6 @@
101835 #ifndef __CFG_H
101836 #define __CFG_H
101837
101838-extern struct cfg80211_ops mac80211_config_ops;
101839+extern const struct cfg80211_ops mac80211_config_ops;
101840
101841 #endif /* __CFG_H */
101842diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
101843index 99c7525..9cb4937 100644
101844--- a/net/mac80211/debugfs_key.c
101845+++ b/net/mac80211/debugfs_key.c
101846@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
101847 size_t count, loff_t *ppos)
101848 {
101849 struct ieee80211_key *key = file->private_data;
101850- int i, res, bufsize = 2 * key->conf.keylen + 2;
101851+ int i, bufsize = 2 * key->conf.keylen + 2;
101852 char *buf = kmalloc(bufsize, GFP_KERNEL);
101853 char *p = buf;
101854+ ssize_t res;
101855+
101856+ if (buf == NULL)
101857+ return -ENOMEM;
101858
101859 for (i = 0; i < key->conf.keylen; i++)
101860 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
101861diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
101862index 33a2e89..08650c8 100644
101863--- a/net/mac80211/debugfs_sta.c
101864+++ b/net/mac80211/debugfs_sta.c
101865@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
101866 int i;
101867 struct sta_info *sta = file->private_data;
101868
101869+ pax_track_stack();
101870+
101871 spin_lock_bh(&sta->lock);
101872 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
101873 sta->ampdu_mlme.dialog_token_allocator + 1);
101874diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
101875index ca62bfe..6657a03 100644
101876--- a/net/mac80211/ieee80211_i.h
101877+++ b/net/mac80211/ieee80211_i.h
101878@@ -25,6 +25,7 @@
101879 #include <linux/etherdevice.h>
101880 #include <net/cfg80211.h>
101881 #include <net/mac80211.h>
101882+#include <asm/local.h>
101883 #include "key.h"
101884 #include "sta_info.h"
101885
101886@@ -635,7 +636,7 @@ struct ieee80211_local {
101887 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
101888 spinlock_t queue_stop_reason_lock;
101889
101890- int open_count;
101891+ local_t open_count;
101892 int monitors, cooked_mntrs;
101893 /* number of interfaces with corresponding FIF_ flags */
101894 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
101895diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
101896index 079c500..eb3c6d4 100644
101897--- a/net/mac80211/iface.c
101898+++ b/net/mac80211/iface.c
101899@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev)
101900 break;
101901 }
101902
101903- if (local->open_count == 0) {
101904+ if (local_read(&local->open_count) == 0) {
101905 res = drv_start(local);
101906 if (res)
101907 goto err_del_bss;
101908@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev)
101909 * Validate the MAC address for this device.
101910 */
101911 if (!is_valid_ether_addr(dev->dev_addr)) {
101912- if (!local->open_count)
101913+ if (!local_read(&local->open_count))
101914 drv_stop(local);
101915 return -EADDRNOTAVAIL;
101916 }
101917@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev)
101918
101919 hw_reconf_flags |= __ieee80211_recalc_idle(local);
101920
101921- local->open_count++;
101922+ local_inc(&local->open_count);
101923 if (hw_reconf_flags) {
101924 ieee80211_hw_config(local, hw_reconf_flags);
101925 /*
101926@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev)
101927 err_del_interface:
101928 drv_remove_interface(local, &conf);
101929 err_stop:
101930- if (!local->open_count)
101931+ if (!local_read(&local->open_count))
101932 drv_stop(local);
101933 err_del_bss:
101934 sdata->bss = NULL;
101935@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev)
101936 WARN_ON(!list_empty(&sdata->u.ap.vlans));
101937 }
101938
101939- local->open_count--;
101940+ local_dec(&local->open_count);
101941
101942 switch (sdata->vif.type) {
101943 case NL80211_IFTYPE_AP_VLAN:
101944@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev)
101945
101946 ieee80211_recalc_ps(local, -1);
101947
101948- if (local->open_count == 0) {
101949+ if (local_read(&local->open_count) == 0) {
101950 ieee80211_clear_tx_pending(local);
101951 ieee80211_stop_device(local);
101952
101953diff --git a/net/mac80211/main.c b/net/mac80211/main.c
101954index 2dfe176..74e4388 100644
101955--- a/net/mac80211/main.c
101956+++ b/net/mac80211/main.c
101957@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
101958 local->hw.conf.power_level = power;
101959 }
101960
101961- if (changed && local->open_count) {
101962+ if (changed && local_read(&local->open_count)) {
101963 ret = drv_config(local, changed);
101964 /*
101965 * Goal:
101966diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
101967index e67eea7..fcc227e 100644
101968--- a/net/mac80211/mlme.c
101969+++ b/net/mac80211/mlme.c
101970@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
101971 bool have_higher_than_11mbit = false, newsta = false;
101972 u16 ap_ht_cap_flags;
101973
101974+ pax_track_stack();
101975+
101976 /*
101977 * AssocResp and ReassocResp have identical structure, so process both
101978 * of them in this function.
101979diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
101980index e535f1c..4d733d1 100644
101981--- a/net/mac80211/pm.c
101982+++ b/net/mac80211/pm.c
101983@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
101984 }
101985
101986 /* stop hardware - this must stop RX */
101987- if (local->open_count)
101988+ if (local_read(&local->open_count))
101989 ieee80211_stop_device(local);
101990
101991 local->suspended = true;
101992diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
101993index b33efc4..0a2efb6 100644
101994--- a/net/mac80211/rate.c
101995+++ b/net/mac80211/rate.c
101996@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
101997 struct rate_control_ref *ref, *old;
101998
101999 ASSERT_RTNL();
102000- if (local->open_count)
102001+ if (local_read(&local->open_count))
102002 return -EBUSY;
102003
102004 ref = rate_control_alloc(name, local);
102005diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
102006index b1d7904..57e4da7 100644
102007--- a/net/mac80211/tx.c
102008+++ b/net/mac80211/tx.c
102009@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
102010 return cpu_to_le16(dur);
102011 }
102012
102013-static int inline is_ieee80211_device(struct ieee80211_local *local,
102014+static inline int is_ieee80211_device(struct ieee80211_local *local,
102015 struct net_device *dev)
102016 {
102017 return local == wdev_priv(dev->ieee80211_ptr);
102018diff --git a/net/mac80211/util.c b/net/mac80211/util.c
102019index 31b1085..48fb26d 100644
102020--- a/net/mac80211/util.c
102021+++ b/net/mac80211/util.c
102022@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
102023 local->resuming = true;
102024
102025 /* restart hardware */
102026- if (local->open_count) {
102027+ if (local_read(&local->open_count)) {
102028 /*
102029 * Upon resume hardware can sometimes be goofy due to
102030 * various platform / driver / bus issues, so restarting
102031diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
102032index 634d14a..b35a608 100644
102033--- a/net/netfilter/Kconfig
102034+++ b/net/netfilter/Kconfig
102035@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
102036
102037 To compile it as a module, choose M here. If unsure, say N.
102038
102039+config NETFILTER_XT_MATCH_GRADM
102040+ tristate '"gradm" match support'
102041+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
102042+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
102043+ ---help---
102044+ The gradm match allows to match on grsecurity RBAC being enabled.
102045+ It is useful when iptables rules are applied early on bootup to
102046+ prevent connections to the machine (except from a trusted host)
102047+ while the RBAC system is disabled.
102048+
102049 config NETFILTER_XT_MATCH_HASHLIMIT
102050 tristate '"hashlimit" match support'
102051 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
102052diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
102053index 49f62ee..a17b2c6 100644
102054--- a/net/netfilter/Makefile
102055+++ b/net/netfilter/Makefile
102056@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
102057 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
102058 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
102059 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
102060+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
102061 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
102062 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
102063 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
102064diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
102065index 3c7e427..724043c 100644
102066--- a/net/netfilter/ipvs/ip_vs_app.c
102067+++ b/net/netfilter/ipvs/ip_vs_app.c
102068@@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = {
102069 .open = ip_vs_app_open,
102070 .read = seq_read,
102071 .llseek = seq_lseek,
102072- .release = seq_release,
102073+ .release = seq_release_net,
102074 };
102075 #endif
102076
102077diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
102078index 95682e5..457dbac 100644
102079--- a/net/netfilter/ipvs/ip_vs_conn.c
102080+++ b/net/netfilter/ipvs/ip_vs_conn.c
102081@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
102082 /* if the connection is not template and is created
102083 * by sync, preserve the activity flag.
102084 */
102085- cp->flags |= atomic_read(&dest->conn_flags) &
102086+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
102087 (~IP_VS_CONN_F_INACTIVE);
102088 else
102089- cp->flags |= atomic_read(&dest->conn_flags);
102090+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
102091 cp->dest = dest;
102092
102093 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
102094@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
102095 atomic_set(&cp->refcnt, 1);
102096
102097 atomic_set(&cp->n_control, 0);
102098- atomic_set(&cp->in_pkts, 0);
102099+ atomic_set_unchecked(&cp->in_pkts, 0);
102100
102101 atomic_inc(&ip_vs_conn_count);
102102 if (flags & IP_VS_CONN_F_NO_CPORT)
102103@@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = {
102104 .open = ip_vs_conn_open,
102105 .read = seq_read,
102106 .llseek = seq_lseek,
102107- .release = seq_release,
102108+ .release = seq_release_net,
102109 };
102110
102111 static const char *ip_vs_origin_name(unsigned flags)
102112@@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
102113 .open = ip_vs_conn_sync_open,
102114 .read = seq_read,
102115 .llseek = seq_lseek,
102116- .release = seq_release,
102117+ .release = seq_release_net,
102118 };
102119
102120 #endif
102121@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
102122
102123 /* Don't drop the entry if its number of incoming packets is not
102124 located in [0, 8] */
102125- i = atomic_read(&cp->in_pkts);
102126+ i = atomic_read_unchecked(&cp->in_pkts);
102127 if (i > 8 || i < 0) return 0;
102128
102129 if (!todrop_rate[i]) return 0;
102130diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
102131index b95699f..5fee919 100644
102132--- a/net/netfilter/ipvs/ip_vs_core.c
102133+++ b/net/netfilter/ipvs/ip_vs_core.c
102134@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
102135 ret = cp->packet_xmit(skb, cp, pp);
102136 /* do not touch skb anymore */
102137
102138- atomic_inc(&cp->in_pkts);
102139+ atomic_inc_unchecked(&cp->in_pkts);
102140 ip_vs_conn_put(cp);
102141 return ret;
102142 }
102143@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
102144 * Sync connection if it is about to close to
102145 * encorage the standby servers to update the connections timeout
102146 */
102147- pkts = atomic_add_return(1, &cp->in_pkts);
102148+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
102149 if (af == AF_INET &&
102150 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
102151 (((cp->protocol != IPPROTO_TCP ||
102152diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
102153index 02b2610..2d89424 100644
102154--- a/net/netfilter/ipvs/ip_vs_ctl.c
102155+++ b/net/netfilter/ipvs/ip_vs_ctl.c
102156@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
102157 ip_vs_rs_hash(dest);
102158 write_unlock_bh(&__ip_vs_rs_lock);
102159 }
102160- atomic_set(&dest->conn_flags, conn_flags);
102161+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
102162
102163 /* bind the service */
102164 if (!dest->svc) {
102165@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
102166 " %-7s %-6d %-10d %-10d\n",
102167 &dest->addr.in6,
102168 ntohs(dest->port),
102169- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
102170+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
102171 atomic_read(&dest->weight),
102172 atomic_read(&dest->activeconns),
102173 atomic_read(&dest->inactconns));
102174@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
102175 "%-7s %-6d %-10d %-10d\n",
102176 ntohl(dest->addr.ip),
102177 ntohs(dest->port),
102178- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
102179+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
102180 atomic_read(&dest->weight),
102181 atomic_read(&dest->activeconns),
102182 atomic_read(&dest->inactconns));
102183@@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = {
102184 .open = ip_vs_info_open,
102185 .read = seq_read,
102186 .llseek = seq_lseek,
102187- .release = seq_release_private,
102188+ .release = seq_release_net,
102189 };
102190
102191 #endif
102192@@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = {
102193 .open = ip_vs_stats_seq_open,
102194 .read = seq_read,
102195 .llseek = seq_lseek,
102196- .release = single_release,
102197+ .release = single_release_net,
102198 };
102199
102200 #endif
102201@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
102202
102203 entry.addr = dest->addr.ip;
102204 entry.port = dest->port;
102205- entry.conn_flags = atomic_read(&dest->conn_flags);
102206+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
102207 entry.weight = atomic_read(&dest->weight);
102208 entry.u_threshold = dest->u_threshold;
102209 entry.l_threshold = dest->l_threshold;
102210@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102211 unsigned char arg[128];
102212 int ret = 0;
102213
102214+ pax_track_stack();
102215+
102216 if (!capable(CAP_NET_ADMIN))
102217 return -EPERM;
102218
102219@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
102220 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
102221
102222 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
102223- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
102224+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
102225 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
102226 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
102227 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
102228diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
102229index e177f0d..55e8581 100644
102230--- a/net/netfilter/ipvs/ip_vs_sync.c
102231+++ b/net/netfilter/ipvs/ip_vs_sync.c
102232@@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
102233
102234 if (opt)
102235 memcpy(&cp->in_seq, opt, sizeof(*opt));
102236- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
102237+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
102238 cp->state = state;
102239 cp->old_state = cp->state;
102240 /*
102241diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
102242index 30b3189..e2e4b55 100644
102243--- a/net/netfilter/ipvs/ip_vs_xmit.c
102244+++ b/net/netfilter/ipvs/ip_vs_xmit.c
102245@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
102246 else
102247 rc = NF_ACCEPT;
102248 /* do not touch skb anymore */
102249- atomic_inc(&cp->in_pkts);
102250+ atomic_inc_unchecked(&cp->in_pkts);
102251 goto out;
102252 }
102253
102254@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
102255 else
102256 rc = NF_ACCEPT;
102257 /* do not touch skb anymore */
102258- atomic_inc(&cp->in_pkts);
102259+ atomic_inc_unchecked(&cp->in_pkts);
102260 goto out;
102261 }
102262
102263diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
102264index d521718..d0fd7a1 100644
102265--- a/net/netfilter/nf_conntrack_netlink.c
102266+++ b/net/netfilter/nf_conntrack_netlink.c
102267@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
102268 static int
102269 ctnetlink_parse_tuple(const struct nlattr * const cda[],
102270 struct nf_conntrack_tuple *tuple,
102271- enum ctattr_tuple type, u_int8_t l3num)
102272+ enum ctattr_type type, u_int8_t l3num)
102273 {
102274 struct nlattr *tb[CTA_TUPLE_MAX+1];
102275 int err;
102276diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
102277index f900dc3..5e45346 100644
102278--- a/net/netfilter/nfnetlink_log.c
102279+++ b/net/netfilter/nfnetlink_log.c
102280@@ -68,7 +68,7 @@ struct nfulnl_instance {
102281 };
102282
102283 static DEFINE_RWLOCK(instances_lock);
102284-static atomic_t global_seq;
102285+static atomic_unchecked_t global_seq;
102286
102287 #define INSTANCE_BUCKETS 16
102288 static struct hlist_head instance_table[INSTANCE_BUCKETS];
102289@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst,
102290 /* global sequence number */
102291 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
102292 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
102293- htonl(atomic_inc_return(&global_seq)));
102294+ htonl(atomic_inc_return_unchecked(&global_seq)));
102295
102296 if (data_len) {
102297 struct nlattr *nla;
102298diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
102299new file mode 100644
102300index 0000000..b1bac76
102301--- /dev/null
102302+++ b/net/netfilter/xt_gradm.c
102303@@ -0,0 +1,51 @@
102304+/*
102305+ * gradm match for netfilter
102306